Skip to content

Commit a1f2dc4

Browse files
authored
Fix load_models function in inference.py
There is a bug in `load_models` function where, when the `f0_condition=False` it's always load the default model instead of the provided one.
1 parent 9772859 commit a1f2dc4

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

inference.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,13 @@ def load_models(args):
3838
global fp16
3939
fp16 = args.fp16
4040
if not args.f0_condition:
41-
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
42-
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
43-
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
44-
f0_fn = None
41+
if args.checkpoint is None:
42+
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
43+
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
44+
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
45+
else:
46+
dit_checkpoint_path = args.checkpoint
47+
dit_config_path = args.config
4548
else:
4649
if args.checkpoint is None:
4750
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",

0 commit comments

Comments
 (0)