fix
This commit is contained in:
parent
f27a991794
commit
8dc195bea7
11
infer-web.py
11
infer-web.py
@ -305,7 +305,11 @@ def click_train(exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_si
|
||||
print("write filelist done")
|
||||
#生成config#无需生成config
|
||||
# cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
print("use gpus:",gpus16)
|
||||
if gpus16:
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
else:
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
print(cmd)
|
||||
p = Popen(cmd, shell=True, cwd=now_dir)
|
||||
p.wait()
|
||||
@ -398,7 +402,10 @@ def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0meth
|
||||
opt.append("%s/%s.wav|%s/%s.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,spk_id5))
|
||||
with open("%s/filelist.txt"%exp_dir,"w")as f:f.write("\n".join(opt))
|
||||
yield get_info_str("write filelist done")
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
if gpus16:
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
else:
|
||||
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3=="是"else 0,batch_size12,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13=="是"else 0,1 if if_cache_gpu17=="是"else 0)
|
||||
yield get_info_str(cmd)
|
||||
p = Popen(cmd, shell=True, cwd=now_dir)
|
||||
p.wait()
|
||||
|
@ -34,9 +34,6 @@ global_step = 0
|
||||
|
||||
|
||||
def main():
|
||||
"""Assume Single Node Multi GPUs Training Only"""
|
||||
assert torch.cuda.is_available(), "CPU training is not allowed."
|
||||
|
||||
# n_gpus = torch.cuda.device_count()
|
||||
os.environ["MASTER_ADDR"] = "localhost"
|
||||
os.environ["MASTER_PORT"] = "5555"
|
||||
@ -65,7 +62,7 @@ def run(rank, n_gpus, hps):
|
||||
backend="gloo", init_method="env://", world_size=n_gpus, rank=rank
|
||||
)
|
||||
torch.manual_seed(hps.train.seed)
|
||||
torch.cuda.set_device(rank)
|
||||
if torch.cuda.is_available(): torch.cuda.set_device(rank)
|
||||
|
||||
if (hps.if_f0 == 1):train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data)
|
||||
else:train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
|
||||
@ -92,9 +89,13 @@ def run(rank, n_gpus, hps):
|
||||
persistent_workers=True,
|
||||
prefetch_factor=8,
|
||||
)
|
||||
if(hps.if_f0==1):net_g = SynthesizerTrnMs256NSFsid(hps.data.filter_length // 2 + 1,hps.train.segment_size // hps.data.hop_length,**hps.model,is_half=hps.train.fp16_run,sr=hps.sample_rate).cuda(rank)
|
||||
else:net_g = SynthesizerTrnMs256NSFsid_nono(hps.data.filter_length // 2 + 1,hps.train.segment_size // hps.data.hop_length,**hps.model,is_half=hps.train.fp16_run).cuda(rank)
|
||||
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
||||
if(hps.if_f0==1):
|
||||
net_g = SynthesizerTrnMs256NSFsid(hps.data.filter_length // 2 + 1,hps.train.segment_size // hps.data.hop_length,**hps.model,is_half=hps.train.fp16_run,sr=hps.sample_rate)
|
||||
else:
|
||||
net_g = SynthesizerTrnMs256NSFsid_nono(hps.data.filter_length // 2 + 1,hps.train.segment_size // hps.data.hop_length,**hps.model,is_half=hps.train.fp16_run)
|
||||
if torch.cuda.is_available(): net_g = net_g.cuda(rank)
|
||||
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm)
|
||||
if torch.cuda.is_available(): net_d = net_d.cuda(rank)
|
||||
optim_g = torch.optim.AdamW(
|
||||
net_g.parameters(),
|
||||
hps.train.learning_rate,
|
||||
@ -109,8 +110,12 @@ def run(rank, n_gpus, hps):
|
||||
)
|
||||
# net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
|
||||
# net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
|
||||
net_g = DDP(net_g, device_ids=[rank])
|
||||
net_d = DDP(net_d, device_ids=[rank])
|
||||
if torch.cuda.is_available():
|
||||
net_g = DDP(net_g, device_ids=[rank])
|
||||
net_d = DDP(net_d, device_ids=[rank])
|
||||
else:
|
||||
net_g = DDP(net_g)
|
||||
net_d = DDP(net_d)
|
||||
|
||||
try:#如果能加载自动resume
|
||||
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) # D多半加载没事
|
||||
@ -190,11 +195,12 @@ def train_and_evaluate(
|
||||
for batch_idx, info in enumerate(train_loader):
|
||||
if (hps.if_f0 == 1):phone,phone_lengths,pitch,pitchf,spec,spec_lengths,wave,wave_lengths,sid=info
|
||||
else:phone,phone_lengths,spec,spec_lengths,wave,wave_lengths,sid=info
|
||||
phone, phone_lengths = phone.cuda(rank, non_blocking=True),phone_lengths.cuda(rank, non_blocking=True )
|
||||
if (hps.if_f0 == 1):pitch,pitchf = pitch.cuda(rank, non_blocking=True),pitchf.cuda(rank, non_blocking=True)
|
||||
sid = sid.cuda(rank, non_blocking=True)
|
||||
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
||||
wave, wave_lengths = wave.cuda(rank, non_blocking=True), wave_lengths.cuda(rank, non_blocking=True)
|
||||
if torch.cuda.is_available():
|
||||
phone, phone_lengths = phone.cuda(rank, non_blocking=True), phone_lengths.cuda(rank, non_blocking=True )
|
||||
if (hps.if_f0 == 1):pitch,pitchf = pitch.cuda(rank, non_blocking=True),pitchf.cuda(rank, non_blocking=True)
|
||||
sid = sid.cuda(rank, non_blocking=True)
|
||||
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
||||
wave, wave_lengths = wave.cuda(rank, non_blocking=True), wave_lengths.cuda(rank, non_blocking=True)
|
||||
if(hps.if_cache_data_in_gpu==True):
|
||||
if (hps.if_f0 == 1):cache.append((batch_idx, (phone,phone_lengths,pitch,pitchf,spec,spec_lengths,wave,wave_lengths ,sid)))
|
||||
else:cache.append((batch_idx, (phone,phone_lengths,spec,spec_lengths,wave,wave_lengths ,sid)))
|
||||
|
Loading…
Reference in New Issue
Block a user