|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; |
| 3 | +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled \ |
| 4 | +; RUN: %s -o - --verify-machineinstrs | FileCheck %s --check-prefix=ENABLED |
| 5 | +; |
| 6 | +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled \ |
| 7 | +; RUN: -arm-loloops-disable-tailpred %s -o - --verify-machineinstrs | \ |
| 8 | +; RUN: FileCheck %s --check-prefix=DISABLED |
| 9 | + |
| 10 | +define dso_local void @check_option(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 { |
| 11 | +; ENABLED-LABEL: check_option: |
| 12 | +; ENABLED: @ %bb.0: @ %entry |
| 13 | +; ENABLED-NEXT: push.w {r4, r5, r6, r7, r8, lr} |
| 14 | +; ENABLED-NEXT: cmp r3, #1 |
| 15 | +; ENABLED-NEXT: blt .LBB0_4 |
| 16 | +; ENABLED-NEXT: @ %bb.1: @ %vector.ph.preheader |
| 17 | +; ENABLED-NEXT: .LBB0_2: @ %vector.ph |
| 18 | +; ENABLED-NEXT: @ =>This Loop Header: Depth=1 |
| 19 | +; ENABLED-NEXT: @ Child Loop BB0_3 Depth 2 |
| 20 | +; ENABLED-NEXT: mov r8, r0 |
| 21 | +; ENABLED-NEXT: mov r4, r2 |
| 22 | +; ENABLED-NEXT: mov r5, r1 |
| 23 | +; ENABLED-NEXT: mov r6, r3 |
| 24 | +; ENABLED-NEXT: dlstp.32 lr, r6 |
| 25 | +; ENABLED-NEXT: .LBB0_3: @ %vector.body |
| 26 | +; ENABLED-NEXT: @ Parent Loop BB0_2 Depth=1 |
| 27 | +; ENABLED-NEXT: @ => This Inner Loop Header: Depth=2 |
| 28 | +; ENABLED-NEXT: vldrw.u32 q0, [r5], #16 |
| 29 | +; ENABLED-NEXT: vldrw.u32 q1, [r4], #16 |
| 30 | +; ENABLED-NEXT: vadd.i32 q0, q1, q0 |
| 31 | +; ENABLED-NEXT: vstrw.32 q0, [r8], #16 |
| 32 | +; ENABLED-NEXT: letp lr, .LBB0_3 |
| 33 | +; ENABLED-NEXT: b .LBB0_2 |
| 34 | +; ENABLED-NEXT: .LBB0_4: @ %for.cond.cleanup |
| 35 | +; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r8, pc} |
| 36 | +; |
| 37 | +; DISABLED-LABEL: check_option: |
| 38 | +; DISABLED: @ %bb.0: @ %entry |
| 39 | +; DISABLED-NEXT: push.w {r4, r5, r6, r7, r8, lr} |
| 40 | +; DISABLED-NEXT: cmp r3, #1 |
| 41 | +; DISABLED-NEXT: blt .LBB0_4 |
| 42 | +; DISABLED-NEXT: @ %bb.1: @ %vector.ph.preheader |
| 43 | +; DISABLED-NEXT: adds r6, r3, #3 |
| 44 | +; DISABLED-NEXT: movs r5, #1 |
| 45 | +; DISABLED-NEXT: bic r6, r6, #3 |
| 46 | +; DISABLED-NEXT: subs r6, #4 |
| 47 | +; DISABLED-NEXT: add.w r12, r5, r6, lsr #2 |
| 48 | +; DISABLED-NEXT: .LBB0_2: @ %vector.ph |
| 49 | +; DISABLED-NEXT: @ =>This Loop Header: Depth=1 |
| 50 | +; DISABLED-NEXT: @ Child Loop BB0_3 Depth 2 |
| 51 | +; DISABLED-NEXT: mov r7, r12 |
| 52 | +; DISABLED-NEXT: mov r8, r0 |
| 53 | +; DISABLED-NEXT: mov r4, r2 |
| 54 | +; DISABLED-NEXT: mov r5, r1 |
| 55 | +; DISABLED-NEXT: mov r6, r3 |
| 56 | +; DISABLED-NEXT: dls lr, r12 |
| 57 | +; DISABLED-NEXT: .LBB0_3: @ %vector.body |
| 58 | +; DISABLED-NEXT: @ Parent Loop BB0_2 Depth=1 |
| 59 | +; DISABLED-NEXT: @ => This Inner Loop Header: Depth=2 |
| 60 | +; DISABLED-NEXT: mov lr, r7 |
| 61 | +; DISABLED-NEXT: vctp.32 r6 |
| 62 | +; DISABLED-NEXT: subs r7, #1 |
| 63 | +; DISABLED-NEXT: subs r6, #4 |
| 64 | +; DISABLED-NEXT: vpstt |
| 65 | +; DISABLED-NEXT: vldrwt.u32 q0, [r5], #16 |
| 66 | +; DISABLED-NEXT: vldrwt.u32 q1, [r4], #16 |
| 67 | +; DISABLED-NEXT: vadd.i32 q0, q1, q0 |
| 68 | +; DISABLED-NEXT: vpst |
| 69 | +; DISABLED-NEXT: vstrwt.32 q0, [r8], #16 |
| 70 | +; DISABLED-NEXT: le lr, .LBB0_3 |
| 71 | +; DISABLED-NEXT: b .LBB0_2 |
| 72 | +; DISABLED-NEXT: .LBB0_4: @ %for.cond.cleanup |
| 73 | +; DISABLED-NEXT: pop.w {r4, r5, r6, r7, r8, pc} |
| 74 | +entry: |
| 75 | + %cmp8 = icmp sgt i32 %N, 0 |
| 76 | + %0 = add i32 %N, 3 |
| 77 | + %1 = lshr i32 %0, 2 |
| 78 | + %2 = shl nuw i32 %1, 2 |
| 79 | + %3 = add i32 %2, -4 |
| 80 | + %4 = lshr i32 %3, 2 |
| 81 | + %5 = add nuw nsw i32 %4, 1 |
| 82 | + br i1 %cmp8, label %vector.ph, label %for.cond.cleanup |
| 83 | + |
| 84 | +vector.ph: ; preds = %entry |
| 85 | + %trip.count.minus.1 = add i32 %N, -1 |
| 86 | + call void @llvm.set.loop.iterations.i32(i32 %5) |
| 87 | + br label %vector.body |
| 88 | + |
| 89 | +vector.body: ; preds = %vector.body, %vector.ph |
| 90 | + %lsr.iv17 = phi i32* [ %scevgep18, %vector.body ], [ %A, %vector.ph ] |
| 91 | + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %C, %vector.ph ] |
| 92 | + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %vector.ph ] |
| 93 | + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| 94 | + %6 = phi i32 [ %5, %vector.ph ], [ %8, %vector.body ] |
| 95 | + |
| 96 | + %lsr.iv13 = bitcast i32* %lsr.iv to <4 x i32>* |
| 97 | + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* |
| 98 | + %lsr.iv1719 = bitcast i32* %lsr.iv17 to <4 x i32>* |
| 99 | + |
| 100 | + %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) |
| 101 | + |
| 102 | + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv13, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) |
| 103 | + %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) |
| 104 | + %7 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load |
| 105 | + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %7, <4 x i32>* %lsr.iv1719, i32 4, <4 x i1> %active.lane.mask) |
| 106 | + %index.next = add i32 %index, 4 |
| 107 | + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 |
| 108 | + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 |
| 109 | + %scevgep18 = getelementptr i32, i32* %lsr.iv17, i32 4 |
| 110 | + %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %6, i32 1) |
| 111 | + %9 = icmp ne i32 %8, 0 |
| 112 | + ;br i1 %9, label %vector.body, label %for.cond.cleanup |
| 113 | + br i1 %9, label %vector.body, label %vector.ph |
| 114 | + |
| 115 | +for.cond.cleanup: ; preds = %vector.body, %entry |
| 116 | + ret void |
| 117 | +} |
| 118 | + |
| 119 | +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) |
| 120 | +declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) |
| 121 | +declare void @llvm.set.loop.iterations.i32(i32) |
| 122 | +declare i32 @llvm.loop.decrement.reg.i32(i32, i32) |
| 123 | +declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) |
0 commit comments