@@ -81,3 +81,170 @@ define <4 x i8> @fshr_v4i8() {
81
81
ret <4 x i8 > %f
82
82
}
83
83
84
+ ; Undef handling
85
+
86
+ define i32 @fshl_scalar_all_undef () {
87
+ ; CHECK-LABEL: @fshl_scalar_all_undef(
88
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 undef)
89
+ ; CHECK-NEXT: ret i32 [[F]]
90
+ ;
91
+ %f = call i32 @llvm.fshl.i32 (i32 undef , i32 undef , i32 undef )
92
+ ret i32 %f
93
+ }
94
+
95
+ define i32 @fshr_scalar_all_undef () {
96
+ ; CHECK-LABEL: @fshr_scalar_all_undef(
97
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 undef)
98
+ ; CHECK-NEXT: ret i32 [[F]]
99
+ ;
100
+ %f = call i32 @llvm.fshr.i32 (i32 undef , i32 undef , i32 undef )
101
+ ret i32 %f
102
+ }
103
+
104
+ define i32 @fshl_scalar_undef_shamt () {
105
+ ; CHECK-LABEL: @fshl_scalar_undef_shamt(
106
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 1, i32 2, i32 undef)
107
+ ; CHECK-NEXT: ret i32 [[F]]
108
+ ;
109
+ %f = call i32 @llvm.fshl.i32 (i32 1 , i32 2 , i32 undef )
110
+ ret i32 %f
111
+ }
112
+
113
+ define i32 @fshr_scalar_undef_shamt () {
114
+ ; CHECK-LABEL: @fshr_scalar_undef_shamt(
115
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 1, i32 2, i32 undef)
116
+ ; CHECK-NEXT: ret i32 [[F]]
117
+ ;
118
+ %f = call i32 @llvm.fshr.i32 (i32 1 , i32 2 , i32 undef )
119
+ ret i32 %f
120
+ }
121
+
122
+ define i32 @fshl_scalar_undef_ops () {
123
+ ; CHECK-LABEL: @fshl_scalar_undef_ops(
124
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 7)
125
+ ; CHECK-NEXT: ret i32 [[F]]
126
+ ;
127
+ %f = call i32 @llvm.fshl.i32 (i32 undef , i32 undef , i32 7 )
128
+ ret i32 %f
129
+ }
130
+
131
+ define i32 @fshr_scalar_undef_ops () {
132
+ ; CHECK-LABEL: @fshr_scalar_undef_ops(
133
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 7)
134
+ ; CHECK-NEXT: ret i32 [[F]]
135
+ ;
136
+ %f = call i32 @llvm.fshr.i32 (i32 undef , i32 undef , i32 7 )
137
+ ret i32 %f
138
+ }
139
+
140
+ define i32 @fshl_scalar_undef_op1_zero_shift () {
141
+ ; CHECK-LABEL: @fshl_scalar_undef_op1_zero_shift(
142
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 1, i32 0)
143
+ ; CHECK-NEXT: ret i32 [[F]]
144
+ ;
145
+ %f = call i32 @llvm.fshl.i32 (i32 undef , i32 1 , i32 0 )
146
+ ret i32 %f
147
+ }
148
+
149
+ define i32 @fshl_scalar_undef_op2_zero_shift () {
150
+ ; CHECK-LABEL: @fshl_scalar_undef_op2_zero_shift(
151
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 1, i32 undef, i32 32)
152
+ ; CHECK-NEXT: ret i32 [[F]]
153
+ ;
154
+ %f = call i32 @llvm.fshl.i32 (i32 1 , i32 undef , i32 32 )
155
+ ret i32 %f
156
+ }
157
+
158
+ define i32 @fshr_scalar_undef_op1_zero_shift () {
159
+ ; CHECK-LABEL: @fshr_scalar_undef_op1_zero_shift(
160
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 1, i32 64)
161
+ ; CHECK-NEXT: ret i32 [[F]]
162
+ ;
163
+ %f = call i32 @llvm.fshr.i32 (i32 undef , i32 1 , i32 64 )
164
+ ret i32 %f
165
+ }
166
+
167
+ define i32 @fshr_scalar_undef_op2_zero_shift () {
168
+ ; CHECK-LABEL: @fshr_scalar_undef_op2_zero_shift(
169
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 1, i32 undef, i32 0)
170
+ ; CHECK-NEXT: ret i32 [[F]]
171
+ ;
172
+ %f = call i32 @llvm.fshr.i32 (i32 1 , i32 undef , i32 0 )
173
+ ret i32 %f
174
+ }
175
+
176
+ define i32 @fshl_scalar_undef_op1_nonzero_shift () {
177
+ ; CHECK-LABEL: @fshl_scalar_undef_op1_nonzero_shift(
178
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 -1, i32 8)
179
+ ; CHECK-NEXT: ret i32 [[F]]
180
+ ;
181
+ %f = call i32 @llvm.fshl.i32 (i32 undef , i32 -1 , i32 8 )
182
+ ret i32 %f
183
+ }
184
+
185
+ define i32 @fshl_scalar_undef_op2_nonzero_shift () {
186
+ ; CHECK-LABEL: @fshl_scalar_undef_op2_nonzero_shift(
187
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 -1, i32 undef, i32 8)
188
+ ; CHECK-NEXT: ret i32 [[F]]
189
+ ;
190
+ %f = call i32 @llvm.fshl.i32 (i32 -1 , i32 undef , i32 8 )
191
+ ret i32 %f
192
+ }
193
+
194
+ define i32 @fshr_scalar_undef_op1_nonzero_shift () {
195
+ ; CHECK-LABEL: @fshr_scalar_undef_op1_nonzero_shift(
196
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 -1, i32 8)
197
+ ; CHECK-NEXT: ret i32 [[F]]
198
+ ;
199
+ %f = call i32 @llvm.fshr.i32 (i32 undef , i32 -1 , i32 8 )
200
+ ret i32 %f
201
+ }
202
+
203
+ define i32 @fshr_scalar_undef_op2_nonzero_shift () {
204
+ ; CHECK-LABEL: @fshr_scalar_undef_op2_nonzero_shift(
205
+ ; CHECK-NEXT: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 -1, i32 undef, i32 8)
206
+ ; CHECK-NEXT: ret i32 [[F]]
207
+ ;
208
+ %f = call i32 @llvm.fshr.i32 (i32 -1 , i32 undef , i32 8 )
209
+ ret i32 %f
210
+ }
211
+
212
+ ; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
213
+ define <4 x i8 > @fshl_vector_mix1 () {
214
+ ; CHECK-LABEL: @fshl_vector_mix1(
215
+ ; CHECK-NEXT: [[F:%.*]] = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
216
+ ; CHECK-NEXT: ret <4 x i8> [[F]]
217
+ ;
218
+ %f = call <4 x i8 > @llvm.fshl.v4i8 (<4 x i8 > <i8 undef , i8 1 , i8 undef , i8 undef >, <4 x i8 > <i8 undef , i8 2 , i8 undef , i8 1 >, <4 x i8 > <i8 undef , i8 undef , i8 3 , i8 0 >)
219
+ ret <4 x i8 > %f
220
+ }
221
+
222
+ ; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
223
+ define <4 x i8 > @fshl_vector_mix2 () {
224
+ ; CHECK-LABEL: @fshl_vector_mix2(
225
+ ; CHECK-NEXT: [[F:%.*]] = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
226
+ ; CHECK-NEXT: ret <4 x i8> [[F]]
227
+ ;
228
+ %f = call <4 x i8 > @llvm.fshl.v4i8 (<4 x i8 > <i8 1 , i8 undef , i8 -1 , i8 7 >, <4 x i8 > <i8 undef , i8 -1 , i8 undef , i8 8 >, <4 x i8 > <i8 8 , i8 2 , i8 2 , i8 4 >)
229
+ ret <4 x i8 > %f
230
+ }
231
+
232
+ ; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
233
+ define <4 x i8 > @fshr_vector_mix1 () {
234
+ ; CHECK-LABEL: @fshr_vector_mix1(
235
+ ; CHECK-NEXT: [[F:%.*]] = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
236
+ ; CHECK-NEXT: ret <4 x i8> [[F]]
237
+ ;
238
+ %f = call <4 x i8 > @llvm.fshr.v4i8 (<4 x i8 > <i8 undef , i8 1 , i8 undef , i8 undef >, <4 x i8 > <i8 undef , i8 2 , i8 undef , i8 1 >, <4 x i8 > <i8 undef , i8 undef , i8 3 , i8 0 >)
239
+ ret <4 x i8 > %f
240
+ }
241
+
242
+ ; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
243
+ define <4 x i8 > @fshr_vector_mix2 () {
244
+ ; CHECK-LABEL: @fshr_vector_mix2(
245
+ ; CHECK-NEXT: [[F:%.*]] = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
246
+ ; CHECK-NEXT: ret <4 x i8> [[F]]
247
+ ;
248
+ %f = call <4 x i8 > @llvm.fshr.v4i8 (<4 x i8 > <i8 1 , i8 undef , i8 -1 , i8 7 >, <4 x i8 > <i8 undef , i8 -1 , i8 undef , i8 8 >, <4 x i8 > <i8 8 , i8 2 , i8 2 , i8 4 >)
249
+ ret <4 x i8 > %f
250
+ }
0 commit comments