@@ -237,6 +237,150 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
237
237
val
238
238
}
239
239
240
+ fn emit_powerpc_va_arg < ' ll , ' tcx > (
241
+ bx : & mut Builder < ' _ , ' ll , ' tcx > ,
242
+ list : OperandRef < ' tcx , & ' ll Value > ,
243
+ target_ty : Ty < ' tcx > ,
244
+ ) -> & ' ll Value {
245
+ let dl = bx. cx . data_layout ( ) ;
246
+
247
+ // struct __va_list_tag {
248
+ // unsigned char gpr;
249
+ // unsigned char fpr;
250
+ // unsigned short reserved;
251
+ // void *overflow_arg_area;
252
+ // void *reg_save_area;
253
+ // };
254
+ let va_list_addr = list. immediate ( ) ;
255
+
256
+ // Peel off any newtype wrappers.
257
+ let layout = {
258
+ let mut layout = bx. cx . layout_of ( target_ty) ;
259
+
260
+ while let Some ( ( _, inner) ) = layout. non_1zst_field ( bx. cx ) {
261
+ layout = inner;
262
+ }
263
+
264
+ layout
265
+ } ;
266
+
267
+ // Rust does not currently support any powerpc softfloat targets.
268
+ let target = & bx. cx . tcx . sess . target ;
269
+ let is_soft_float_abi = target. abi == "softfloat" ;
270
+ assert ! ( !is_soft_float_abi) ;
271
+
272
+ // All instances of VaArgSafe are passed directly.
273
+ let is_indirect = false ;
274
+
275
+ let ( is_i64, is_int, is_f64) = match layout. layout . backend_repr ( ) {
276
+ BackendRepr :: Scalar ( scalar) => match scalar. primitive ( ) {
277
+ rustc_abi:: Primitive :: Int ( integer, _) => ( integer. size ( ) . bits ( ) == 64 , true , false ) ,
278
+ rustc_abi:: Primitive :: Float ( float) => ( false , false , float. size ( ) . bits ( ) == 64 ) ,
279
+ rustc_abi:: Primitive :: Pointer ( _) => ( false , true , false ) ,
280
+ } ,
281
+ _ => unreachable ! ( "all instances of VaArgSafe are represented as scalars" ) ,
282
+ } ;
283
+
284
+ let num_regs_addr = if is_int || is_soft_float_abi {
285
+ va_list_addr // gpr
286
+ } else {
287
+ bx. inbounds_ptradd ( va_list_addr, bx. const_usize ( 1 ) ) // fpr
288
+ } ;
289
+
290
+ let mut num_regs = bx. load ( bx. type_i8 ( ) , num_regs_addr, dl. i8_align . abi ) ;
291
+
292
+ // "Align" the register count when the type is passed as `i64`.
293
+ if is_i64 || ( is_f64 && is_soft_float_abi) {
294
+ num_regs = bx. add ( num_regs, bx. const_u8 ( 1 ) ) ;
295
+ num_regs = bx. and ( num_regs, bx. const_u8 ( 0b1111_1110 ) ) ;
296
+ }
297
+
298
+ let max_regs = 8u8 ;
299
+ let use_regs = bx. icmp ( IntPredicate :: IntULT , num_regs, bx. const_u8 ( max_regs) ) ;
300
+
301
+ let in_reg = bx. append_sibling_block ( "va_arg.in_reg" ) ;
302
+ let in_mem = bx. append_sibling_block ( "va_arg.in_mem" ) ;
303
+ let end = bx. append_sibling_block ( "va_arg.end" ) ;
304
+
305
+ bx. cond_br ( use_regs, in_reg, in_mem) ;
306
+
307
+ let reg_addr = {
308
+ bx. switch_to_block ( in_reg) ;
309
+
310
+ let reg_safe_area_ptr = bx. inbounds_ptradd ( va_list_addr, bx. cx . const_usize ( 1 + 1 + 2 + 4 ) ) ;
311
+ let mut reg_addr = bx. load ( bx. type_ptr ( ) , reg_safe_area_ptr, dl. pointer_align . abi ) ;
312
+
313
+ // Floating-point registers start after the general-purpose registers.
314
+ if !is_int && !is_soft_float_abi {
315
+ reg_addr = bx. inbounds_ptradd ( reg_addr, bx. cx . const_usize ( 32 ) )
316
+ }
317
+
318
+ // Get the address of the saved value by scaling the number of
319
+ // registers we've used by the number of.
320
+ let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 } ;
321
+ let reg_offset = bx. mul ( num_regs, bx. cx ( ) . const_u8 ( reg_size) ) ;
322
+ let reg_addr = bx. inbounds_ptradd ( reg_addr, reg_offset) ;
323
+
324
+ // Increase the used-register count.
325
+ let reg_incr = if is_i64 || ( is_f64 && is_soft_float_abi) { 2 } else { 1 } ;
326
+ let new_num_regs = bx. add ( num_regs, bx. cx . const_u8 ( reg_incr) ) ;
327
+ bx. store ( new_num_regs, num_regs_addr, dl. i8_align . abi ) ;
328
+
329
+ bx. br ( end) ;
330
+
331
+ reg_addr
332
+ } ;
333
+
334
+ let mem_addr = {
335
+ bx. switch_to_block ( in_mem) ;
336
+
337
+ bx. store ( bx. const_u8 ( max_regs) , num_regs_addr, dl. i8_align . abi ) ;
338
+
339
+ // Everything in the overflow area is rounded up to a size of at least 4.
340
+ let overflow_area_align = Align :: from_bytes ( 4 ) . unwrap ( ) ;
341
+
342
+ let size = if !is_indirect {
343
+ layout. layout . size . align_to ( overflow_area_align)
344
+ } else {
345
+ dl. pointer_size
346
+ } ;
347
+
348
+ let overflow_area_ptr = bx. inbounds_ptradd ( va_list_addr, bx. cx . const_usize ( 1 + 1 + 2 ) ) ;
349
+ let mut overflow_area = bx. load ( bx. type_ptr ( ) , overflow_area_ptr, dl. pointer_align . abi ) ;
350
+
351
+ // Round up address of argument to alignment
352
+ if layout. layout . align . abi > overflow_area_align {
353
+ overflow_area = round_pointer_up_to_alignment (
354
+ bx,
355
+ overflow_area,
356
+ layout. layout . align . abi ,
357
+ bx. type_ptr ( ) ,
358
+ ) ;
359
+ }
360
+
361
+ let mem_addr = overflow_area;
362
+
363
+ // Increase the overflow area.
364
+ overflow_area = bx. inbounds_ptradd ( overflow_area, bx. const_usize ( size. bytes ( ) ) ) ;
365
+ bx. store ( overflow_area, overflow_area_ptr, dl. pointer_align . abi ) ;
366
+
367
+ bx. br ( end) ;
368
+
369
+ mem_addr
370
+ } ;
371
+
372
+ // Return the appropriate result.
373
+ bx. switch_to_block ( end) ;
374
+ let val_addr = bx. phi ( bx. type_ptr ( ) , & [ reg_addr, mem_addr] , & [ in_reg, in_mem] ) ;
375
+ let val_type = layout. llvm_type ( bx) ;
376
+ let val_addr = if is_indirect {
377
+ bx. load ( bx. cx . type_ptr ( ) , val_addr, dl. pointer_align . abi )
378
+ } else {
379
+ val_addr
380
+ } ;
381
+ bx. load ( val_type, val_addr, layout. align . abi )
382
+ }
383
+
240
384
fn emit_s390x_va_arg < ' ll , ' tcx > (
241
385
bx : & mut Builder < ' _ , ' ll , ' tcx > ,
242
386
list : OperandRef < ' tcx , & ' ll Value > ,
@@ -773,6 +917,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
773
917
}
774
918
"aarch64" => emit_aapcs_va_arg ( bx, addr, target_ty) ,
775
919
"s390x" => emit_s390x_va_arg ( bx, addr, target_ty) ,
920
+ "powerpc" => emit_powerpc_va_arg ( bx, addr, target_ty) ,
776
921
"powerpc64" | "powerpc64le" => emit_ptr_va_arg (
777
922
bx,
778
923
addr,
0 commit comments