@@ -62,7 +62,6 @@ def infallible(self) -> bool:
62
62
return not self .error_with_pop and not self .error_without_pop
63
63
64
64
65
-
66
65
SKIP_PROPERTIES = Properties (
67
66
escapes = False ,
68
67
error_with_pop = False ,
@@ -99,7 +98,6 @@ def properties(self) -> Properties:
99
98
100
99
101
100
class Flush :
102
-
103
101
@property
104
102
def properties (self ) -> Properties :
105
103
return SKIP_PROPERTIES
@@ -112,6 +110,7 @@ def name(self) -> str:
112
110
def size (self ) -> int :
113
111
return 0
114
112
113
+
115
114
@dataclass
116
115
class StackItem :
117
116
name : str
@@ -133,6 +132,7 @@ def is_array(self) -> bool:
133
132
def get_size (self ) -> str :
134
133
return self .size if self .size else "1"
135
134
135
+
136
136
@dataclass
137
137
class StackEffect :
138
138
inputs : list [StackItem ]
@@ -150,6 +150,7 @@ class CacheEntry:
150
150
def __str__ (self ) -> str :
151
151
return f"{ self .name } /{ self .size } "
152
152
153
+
153
154
@dataclass
154
155
class Uop :
155
156
name : str
@@ -163,7 +164,7 @@ class Uop:
163
164
_size : int = - 1
164
165
implicitly_created : bool = False
165
166
replicated = 0
166
- replicates : "Uop | None" = None
167
+ replicates : "Uop | None" = None
167
168
168
169
def dump (self , indent : str ) -> None :
169
170
print (
@@ -308,19 +309,26 @@ def override_error(
308
309
)
309
310
310
311
311
- def convert_stack_item (item : parser .StackEffect , replace_op_arg_1 : str | None ) -> StackItem :
312
+ def convert_stack_item (
313
+ item : parser .StackEffect , replace_op_arg_1 : str | None
314
+ ) -> StackItem :
312
315
cond = item .cond
313
316
if replace_op_arg_1 and OPARG_AND_1 .match (item .cond ):
314
317
cond = replace_op_arg_1
315
- return StackItem (
316
- item .name , item .type , cond , item .size
317
- )
318
+ return StackItem (item .name , item .type , cond , item .size )
319
+
318
320
319
- def analyze_stack (op : parser .InstDef | parser .Pseudo , replace_op_arg_1 : str | None = None ) -> StackEffect :
321
+ def analyze_stack (
322
+ op : parser .InstDef | parser .Pseudo , replace_op_arg_1 : str | None = None
323
+ ) -> StackEffect :
320
324
inputs : list [StackItem ] = [
321
- convert_stack_item (i , replace_op_arg_1 ) for i in op .inputs if isinstance (i , parser .StackEffect )
325
+ convert_stack_item (i , replace_op_arg_1 )
326
+ for i in op .inputs
327
+ if isinstance (i , parser .StackEffect )
328
+ ]
329
+ outputs : list [StackItem ] = [
330
+ convert_stack_item (i , replace_op_arg_1 ) for i in op .outputs
322
331
]
323
- outputs : list [StackItem ] = [convert_stack_item (i , replace_op_arg_1 ) for i in op .outputs ]
324
332
# Mark variables with matching names at the base of the stack as "peek"
325
333
modified = False
326
334
for input , output in zip (inputs , outputs ):
@@ -331,9 +339,11 @@ def analyze_stack(op: parser.InstDef | parser.Pseudo, replace_op_arg_1: str | No
331
339
if isinstance (op , parser .InstDef ):
332
340
output_names = [out .name for out in outputs ]
333
341
for input in inputs :
334
- if (variable_used (op , input .name ) or
335
- variable_used (op , "DECREF_INPUTS" ) or
336
- (not input .peek and input .name in output_names )):
342
+ if (
343
+ variable_used (op , input .name )
344
+ or variable_used (op , "DECREF_INPUTS" )
345
+ or (not input .peek and input .name in output_names )
346
+ ):
337
347
input .used = True
338
348
for output in outputs :
339
349
if variable_used (op , output .name ):
@@ -359,9 +369,9 @@ def analyze_deferred_refs(node: parser.InstDef) -> dict[lexer.Token, str | None]
359
369
def find_assignment_target (idx : int ) -> list [lexer .Token ]:
360
370
"""Find the tokens that make up the left-hand side of an assignment"""
361
371
offset = 1
362
- for tkn in reversed (node .block .tokens [:idx - 1 ]):
372
+ for tkn in reversed (node .block .tokens [: idx - 1 ]):
363
373
if tkn .kind == "SEMI" or tkn .kind == "LBRACE" or tkn .kind == "RBRACE" :
364
- return node .block .tokens [idx - offset : idx - 1 ]
374
+ return node .block .tokens [idx - offset : idx - 1 ]
365
375
offset += 1
366
376
return []
367
377
@@ -370,42 +380,54 @@ def find_assignment_target(idx: int) -> list[lexer.Token]:
370
380
if tkn .kind != "IDENTIFIER" or tkn .text != "PyStackRef_FromPyObjectNew" :
371
381
continue
372
382
373
- if idx == 0 or node .block .tokens [idx - 1 ].kind != "EQUALS" :
383
+ if idx == 0 or node .block .tokens [idx - 1 ].kind != "EQUALS" :
374
384
raise analysis_error ("Expected '=' before PyStackRef_FromPyObjectNew" , tkn )
375
385
376
386
lhs = find_assignment_target (idx )
377
387
if len (lhs ) == 0 :
378
- raise analysis_error ("PyStackRef_FromPyObjectNew() must be assigned to an output" , tkn )
388
+ raise analysis_error (
389
+ "PyStackRef_FromPyObjectNew() must be assigned to an output" , tkn
390
+ )
379
391
380
- if lhs [0 ].kind == "TIMES" or any (t .kind == "ARROW" or t .kind == "LBRACKET" for t in lhs [1 :]):
392
+ if lhs [0 ].kind == "TIMES" or any (
393
+ t .kind == "ARROW" or t .kind == "LBRACKET" for t in lhs [1 :]
394
+ ):
381
395
# Don't handle: *ptr = ..., ptr->field = ..., or ptr[field] = ...
382
396
# Assume that they are visible to the GC.
383
397
refs [tkn ] = None
384
398
continue
385
399
386
400
if len (lhs ) != 1 or lhs [0 ].kind != "IDENTIFIER" :
387
- raise analysis_error ("PyStackRef_FromPyObjectNew() must be assigned to an output" , tkn )
401
+ raise analysis_error (
402
+ "PyStackRef_FromPyObjectNew() must be assigned to an output" , tkn
403
+ )
388
404
389
405
name = lhs [0 ].text
390
406
if not any (var .name == name for var in node .outputs ):
391
- raise analysis_error (f"PyStackRef_FromPyObjectNew() must be assigned to an output, not '{ name } '" , tkn )
407
+ raise analysis_error (
408
+ f"PyStackRef_FromPyObjectNew() must be assigned to an output, not '{ name } '" ,
409
+ tkn ,
410
+ )
392
411
393
412
refs [tkn ] = name
394
413
395
414
return refs
396
415
416
+
397
417
def variable_used (node : parser .InstDef , name : str ) -> bool :
398
418
"""Determine whether a variable with a given name is used in a node."""
399
419
return any (
400
420
token .kind == "IDENTIFIER" and token .text == name for token in node .block .tokens
401
421
)
402
422
423
+
403
424
def oparg_used (node : parser .InstDef ) -> bool :
404
425
"""Determine whether `oparg` is used in a node."""
405
426
return any (
406
427
token .kind == "IDENTIFIER" and token .text == "oparg" for token in node .tokens
407
428
)
408
429
430
+
409
431
def tier_variable (node : parser .InstDef ) -> int | None :
410
432
"""Determine whether a tier variable is used in a node."""
411
433
for token in node .tokens :
@@ -416,6 +438,7 @@ def tier_variable(node: parser.InstDef) -> int | None:
416
438
return int (token .text [- 1 ])
417
439
return None
418
440
441
+
419
442
def has_error_with_pop (op : parser .InstDef ) -> bool :
420
443
return (
421
444
variable_used (op , "ERROR_IF" )
@@ -424,6 +447,7 @@ def has_error_with_pop(op: parser.InstDef) -> bool:
424
447
or variable_used (op , "resume_with_error" )
425
448
)
426
449
450
+
427
451
def has_error_without_pop (op : parser .InstDef ) -> bool :
428
452
return (
429
453
variable_used (op , "ERROR_NO_POP" )
@@ -606,8 +630,10 @@ def stack_effect_only_peeks(instr: parser.InstDef) -> bool:
606
630
for s , other in zip (stack_inputs , instr .outputs )
607
631
)
608
632
633
+
609
634
OPARG_AND_1 = re .compile ("\\ (*oparg *& *1" )
610
635
636
+
611
637
def effect_depends_on_oparg_1 (op : parser .InstDef ) -> bool :
612
638
for effect in op .inputs :
613
639
if isinstance (effect , parser .CacheEffect ):
@@ -623,6 +649,7 @@ def effect_depends_on_oparg_1(op: parser.InstDef) -> bool:
623
649
return True
624
650
return False
625
651
652
+
626
653
def compute_properties (op : parser .InstDef ) -> Properties :
627
654
has_free = (
628
655
variable_used (op , "PyCell_New" )
@@ -667,7 +694,12 @@ def compute_properties(op: parser.InstDef) -> Properties:
667
694
)
668
695
669
696
670
- def make_uop (name : str , op : parser .InstDef , inputs : list [parser .InputEffect ], uops : dict [str , Uop ]) -> Uop :
697
+ def make_uop (
698
+ name : str ,
699
+ op : parser .InstDef ,
700
+ inputs : list [parser .InputEffect ],
701
+ uops : dict [str , Uop ],
702
+ ) -> Uop :
671
703
result = Uop (
672
704
name = name ,
673
705
context = op .context ,
@@ -685,7 +717,9 @@ def make_uop(name: str, op: parser.InstDef, inputs: list[parser.InputEffect], uo
685
717
properties = compute_properties (op )
686
718
if properties .oparg :
687
719
# May not need oparg anymore
688
- properties .oparg = any (token .text == "oparg" for token in op .block .tokens )
720
+ properties .oparg = any (
721
+ token .text == "oparg" for token in op .block .tokens
722
+ )
689
723
rep = Uop (
690
724
name = name_x ,
691
725
context = op .context ,
@@ -736,8 +770,10 @@ def add_op(op: parser.InstDef, uops: dict[str, Uop]) -> None:
736
770
737
771
738
772
def add_instruction (
739
- where : lexer .Token , name : str , parts : list [Part ],
740
- instructions : dict [str , Instruction ]
773
+ where : lexer .Token ,
774
+ name : str ,
775
+ parts : list [Part ],
776
+ instructions : dict [str , Instruction ],
741
777
) -> None :
742
778
instructions [name ] = Instruction (where , name , parts , None )
743
779
@@ -781,7 +817,9 @@ def add_macro(
781
817
parts .append (Flush ())
782
818
else :
783
819
if part .name not in uops :
784
- raise analysis_error (f"No Uop named { part .name } " , macro .tokens [0 ])
820
+ raise analysis_error (
821
+ f"No Uop named { part .name } " , macro .tokens [0 ]
822
+ )
785
823
parts .append (uops [part .name ])
786
824
case parser .CacheEffect ():
787
825
parts .append (Skip (part .size ))
0 commit comments