@@ -283,7 +283,7 @@ def train(args, data, tokenizer, use_wandb=False):
283
283
criterion = criterion ,
284
284
tokenizer = tokenizer ,
285
285
device = device ,
286
- max_answer_length = 30
286
+ max_answer_length = 100000000000
287
287
)
288
288
289
289
# Print detailed metrics
@@ -299,21 +299,21 @@ def train(args, data, tokenizer, use_wandb=False):
299
299
print (f"Recall: { eval_metrics ['recall' ]:.4f} " )
300
300
print (f"F1: { eval_metrics ['f1' ]:.4f} " )
301
301
302
- # print("\nDetailed Token Statistics:")
303
- # print(f"True Positives: {eval_metrics['true_positives']}")
304
- # print(f"False Positives: {eval_metrics['false_positives']}")
305
- # print(f"False Negatives: {eval_metrics['false_negatives']}")
302
+ print ("\n Detailed Token Statistics:" )
303
+ print (f"True Positives: { eval_metrics ['true_positives' ]} " )
304
+ print (f"False Positives: { eval_metrics ['false_positives' ]} " )
305
+ print (f"False Negatives: { eval_metrics ['false_negatives' ]} " )
306
306
307
- # print("\nOverprediction Analysis:")
308
- # print(f"Total Questions: {eval_metrics['total_questions']}")
309
- # print(f"Overprediction Cases: {eval_metrics['overprediction_cases']}")
310
- # if eval_metrics['overprediction_cases'] > 0:
311
- # overpred_percentage = (eval_metrics['overprediction_cases'] / eval_metrics['total_questions']) * 100
312
- # print(f"Overprediction Percentage: {overpred_percentage:.1f}%")
313
- # print(f"Average Predicted Length: {eval_metrics['avg_overprediction_length']:.1f} tokens")
314
- # print(f"Average True Length: {eval_metrics['avg_true_length']:.1f} tokens")
315
- # avg_extra = eval_metrics['avg_overprediction_length'] - eval_metrics['avg_true_length']
316
- # print(f"Average Extra Tokens: {avg_extra:.1f}")
307
+ print ("\n Overprediction Analysis:" )
308
+ print (f"Total Questions: { eval_metrics ['total_questions' ]} " )
309
+ print (f"Overprediction Cases: { eval_metrics ['overprediction_cases' ]} " )
310
+ if eval_metrics ['overprediction_cases' ] > 0 :
311
+ overpred_percentage = (eval_metrics ['overprediction_cases' ] / eval_metrics ['total_questions' ]) * 100
312
+ print (f"Overprediction Percentage: { overpred_percentage :.1f} %" )
313
+ print (f"Average Predicted Length: { eval_metrics ['avg_overprediction_length' ]:.1f} tokens" )
314
+ print (f"Average True Length: { eval_metrics ['avg_true_length' ]:.1f} tokens" )
315
+ avg_extra = eval_metrics ['avg_overprediction_length' ] - eval_metrics ['avg_true_length' ]
316
+ print (f"Average Extra Tokens: { avg_extra :.1f} " )
317
317
318
318
if use_wandb :
319
319
wandb .log ({
0 commit comments