We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c7fd336 commit 0cfcb5aCopy full SHA for 0cfcb5a
fine_tune.py
@@ -397,7 +397,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
397
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
398
if args.logging_dir is not None:
399
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
400
- if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy": # tracking d*lr value
+ if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
401
logs["lr/d*lr"] = (
402
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
403
)
0 commit comments