1
0
mirror of synced 2024-11-15 03:07:40 +01:00
Retrieval-based-Voice-Conve.../infer-web.py

636 lines
39 KiB
Python
Raw Normal View History

2023-03-31 11:54:38 +02:00
from multiprocessing import cpu_count
import threading
from time import sleep
from subprocess import Popen
2023-03-31 11:54:38 +02:00
from time import sleep
import torch, os,traceback,sys,warnings,shutil,numpy as np
import faiss
from webui_locale import I18nAuto
i18n = I18nAuto()
2023-03-31 11:54:38 +02:00
#判断是否有能用来训练和加速推理的N卡
ncpu=cpu_count()
ngpu=torch.cuda.device_count()
gpu_infos=[]
if((not torch.cuda.is_available()) or ngpu==0):if_gpu_ok=False
2023-03-31 11:54:38 +02:00
else:
if_gpu_ok = False
for i in range(ngpu):
gpu_name=torch.cuda.get_device_name(i)
if("16"in gpu_name or "MX"in gpu_name):continue
if("10"in gpu_name or "20"in gpu_name or "30"in gpu_name or "40"in gpu_name or "A50"in gpu_name.upper() or "70"in gpu_name or "80"in gpu_name or "90"in gpu_name or "M4"in gpu_name or "T4"in gpu_name or "TITAN"in gpu_name.upper()):#A10#A100#V100#A40#P40#M40#K80
if_gpu_ok=True#至少有一张能用的N卡
gpu_infos.append("%s\t%s"%(i,gpu_name))
gpu_info="\n".join(gpu_infos)if if_gpu_ok==True and len(gpu_infos)>0 else "很遗憾您这没有能用的显卡来支持您训练"
gpus="-".join([i[0]for i in gpu_infos])
now_dir=os.getcwd()
sys.path.append(now_dir)
tmp=os.path.join(now_dir,"TEMP")
shutil.rmtree(tmp,ignore_errors=True)
os.makedirs(tmp,exist_ok=True)
os.makedirs(os.path.join(now_dir,"logs"),exist_ok=True)
os.makedirs(os.path.join(now_dir,"weights"),exist_ok=True)
os.environ["TEMP"]=tmp
warnings.filterwarnings("ignore")
torch.manual_seed(114514)
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
from scipy.io import wavfile
from fairseq import checkpoint_utils
import gradio as gr
import logging
from vc_infer_pipeline import VC
from config import is_half,device,python_cmd,listen_port,iscolab,noparallel,noautoopen
2023-03-31 11:54:38 +02:00
from infer_uvr5 import _audio_pre_
from my_utils import load_audio
from train.process_ckpt import show_info,change_info,merge,extract_small_model
# from trainset_preprocess_pipeline import PreProcess
logging.getLogger('numba').setLevel(logging.WARNING)
class ToolButton(gr.Button, gr.components.FormComponent):
"""Small button with single emoji as text, fits inside gradio forms"""
def __init__(self, **kwargs):
super().__init__(variant="tool", **kwargs)
def get_block_name(self):
return "button"
hubert_model=None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"],suffix="",)
2023-03-31 11:54:38 +02:00
hubert_model = models[0]
hubert_model = hubert_model.to(device)
if(is_half):hubert_model = hubert_model.half()
else:hubert_model = hubert_model.float()
hubert_model.eval()
weight_root="weights"
weight_uvr5_root="uvr5_weights"
names=[]
for name in os.listdir(weight_root):
2023-04-01 13:10:26 +02:00
if name.endswith(".pth"): names.append(name)
2023-03-31 11:54:38 +02:00
uvr5_names=[]
for name in os.listdir(weight_uvr5_root):
if name.endswith(".pth"): uvr5_names.append(name.replace(".pth",""))
2023-03-31 11:54:38 +02:00
def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,file_big_npy,index_rate):#spk_item, input_audio0, vc_transform0,f0_file,f0method0
global tgt_sr,net_g,vc,hubert_model
if input_audio is None:return "You need to upload an audio", None
f0_up_key = int(f0_up_key)
try:
audio=load_audio(input_audio,16000)
times = [0, 0, 0]
if(hubert_model==None):load_hubert()
if_f0 = cpt.get("f0", 1)
audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
print("npy: ", times[0], "s, f0: ", times[1], "s, infer: ", times[2], "s", sep='')
2023-03-31 11:54:38 +02:00
return "Success", (tgt_sr, audio_opt)
except:
info=traceback.format_exc()
print(info)
return info,(None,None)
def vc_multi(sid,dir_path,opt_root,paths,f0_up_key,f0_method,file_index,file_big_npy,index_rate):
try:
2023-04-09 17:21:05 +02:00
dir_path=dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")#防止小白拷路径头尾带了空格和"和回车
opt_root=opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
2023-03-31 11:54:38 +02:00
os.makedirs(opt_root, exist_ok=True)
try:
if(dir_path!=""):paths=[os.path.join(dir_path,name)for name in os.listdir(dir_path)]
else:paths=[path.name for path in paths]
except:
traceback.print_exc()
paths = [path.name for path in paths]
infos=[]
for path in paths:
info,opt=vc_single(sid,path,f0_up_key,None,f0_method,file_index,file_big_npy,index_rate)
if(info=="Success"):
try:
tgt_sr,audio_opt=opt
wavfile.write("%s/%s" % (opt_root, os.path.basename(path)), tgt_sr, audio_opt)
except:
info=traceback.format_exc()
infos.append("%s->%s"%(os.path.basename(path),info))
yield "\n".join(infos)
yield "\n".join(infos)
except:
yield traceback.format_exc()
def uvr(model_name,inp_root,save_root_vocal,paths,save_root_ins):
infos = []
try:
2023-04-09 17:21:05 +02:00
inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
save_root_vocal = save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
save_root_ins = save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
2023-03-31 11:54:38 +02:00
pre_fun = _audio_pre_(model_path=os.path.join(weight_uvr5_root,model_name+".pth"), device=device, is_half=is_half)
if (inp_root != ""):paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
else:paths = [path.name for path in paths]
for name in paths:
inp_path=os.path.join(inp_root,name)
try:
pre_fun._path_audio_(inp_path , save_root_ins,save_root_vocal)
infos.append("%s->Success"%(os.path.basename(inp_path)))
yield "\n".join(infos)
except:
infos.append("%s->%s" % (os.path.basename(inp_path),traceback.format_exc()))
yield "\n".join(infos)
except:
infos.append(traceback.format_exc())
yield "\n".join(infos)
finally:
try:
del pre_fun.model
del pre_fun
except:
traceback.print_exc()
print("clean_empty_cache")
if torch.cuda.is_available(): torch.cuda.empty_cache()
2023-03-31 11:54:38 +02:00
yield "\n".join(infos)
#一个选项卡全局只能有一个音色
def get_vc(sid):
global n_spk,tgt_sr,net_g,vc,cpt
2023-04-05 16:58:49 +02:00
if(sid==[]):
2023-03-31 11:54:38 +02:00
global hubert_model
if (hubert_model != None): # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
2023-04-06 05:31:20 +02:00
print("clean_empty_cache")
del net_g, n_spk, vc, hubert_model,tgt_sr#,cpt
hubert_model = net_g=n_spk=vc=hubert_model=tgt_sr=None
if torch.cuda.is_available(): torch.cuda.empty_cache()
2023-04-06 05:31:20 +02:00
###楼下不这么折腾清理不干净
if_f0 = cpt.get("f0", 1)
if (if_f0 == 1):
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
del net_g,cpt
if torch.cuda.is_available(): torch.cuda.empty_cache()
2023-04-06 05:31:20 +02:00
cpt=None
2023-03-31 11:54:38 +02:00
return {"visible": False, "__type__": "update"}
person = "%s/%s" % (weight_root, sid)
print("loading %s"%person)
cpt = torch.load(person, map_location="cpu")
tgt_sr = cpt["config"][-1]
cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk
if_f0=cpt.get("f0",1)
if(if_f0==1):
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
del net_g.enc_q
print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
2023-03-31 11:54:38 +02:00
net_g.eval().to(device)
if (is_half):net_g = net_g.half()
else:net_g = net_g.float()
vc = VC(tgt_sr, device, is_half)
n_spk=cpt["config"][-3]
return {"visible": True,"maximum": n_spk, "__type__": "update"}
def change_choices():
2023-04-01 13:58:33 +02:00
names=[]
for name in os.listdir(weight_root):
2023-04-01 13:10:26 +02:00
if name.endswith(".pth"): names.append(name)
return {"choices": sorted(names), "__type__": "update"}
2023-03-31 11:54:38 +02:00
def clean():return {"value": "", "__type__": "update"}
def change_f0(if_f0_3,sr2):#np7, f0method8,pretrained_G14,pretrained_D15
if(if_f0_3==""):return {"visible": True, "__type__": "update"},{"visible": True, "__type__": "update"},"pretrained/f0G%s.pth"%sr2,"pretrained/f0D%s.pth"%sr2
return {"visible": False, "__type__": "update"}, {"visible": False, "__type__": "update"},"pretrained/G%s.pth"%sr2,"pretrained/D%s.pth"%sr2
sr_dict={
"32k":32000,
"40k":40000,
"48k":48000,
}
def if_done(done,p):
while 1:
if(p.poll()==None):sleep(0.5)
else:break
done[0]=True
def if_done_multi(done,ps):
while 1:
#poll==None代表进程未结束
#只要有一个进程未结束都不停
flag=1
for p in ps:
if(p.poll()==None):
flag = 0
sleep(0.5)
break
if(flag==1):break
done[0]=True
def preprocess_dataset(trainset_dir,exp_dir,sr,n_p=ncpu):
sr=sr_dict[sr]
os.makedirs("%s/logs/%s"%(now_dir,exp_dir),exist_ok=True)
f = open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir), "w")
f.close()
cmd=python_cmd + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "%(trainset_dir,sr,n_p,now_dir,exp_dir)+str(noparallel)
2023-03-31 11:54:38 +02:00
print(cmd)
p = Popen(cmd, shell=True)#, stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
2023-03-31 11:54:38 +02:00
done=[False]
threading.Thread(target=if_done,args=(done,p,)).start()
while(1):
with open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir),"r")as f:yield(f.read())
sleep(1)
if(done[0]==True):break
with open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir), "r")as f:log = f.read()
print(log)
yield log
#but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
def extract_f0_feature(gpus,n_p,f0method,if_f0,exp_dir):
gpus=gpus.split("-")
os.makedirs("%s/logs/%s"%(now_dir,exp_dir),exist_ok=True)
f = open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir), "w")
f.close()
if(if_f0==""):
cmd=python_cmd + " extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir,n_p,f0method)
2023-03-31 11:54:38 +02:00
print(cmd)
p = Popen(cmd, shell=True,cwd=now_dir)#, stdin=PIPE, stdout=PIPE,stderr=PIPE
###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
2023-03-31 11:54:38 +02:00
done=[False]
threading.Thread(target=if_done,args=(done,p,)).start()
while(1):
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir),"r")as f:yield(f.read())
sleep(1)
if(done[0]==True):break
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir), "r")as f:log = f.read()
print(log)
yield log
####对不同part分别开多进程
'''
n_part=int(sys.argv[1])
i_part=int(sys.argv[2])
i_gpu=sys.argv[3]
exp_dir=sys.argv[4]
os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
'''
leng=len(gpus)
ps=[]
for idx,n_g in enumerate(gpus):
cmd=python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s"%(device,leng,idx,n_g,now_dir,exp_dir)
2023-03-31 11:54:38 +02:00
print(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
ps.append(p)
###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
2023-03-31 11:54:38 +02:00
done = [False]
threading.Thread(target=if_done_multi, args=(done, ps,)).start()
while (1):
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir), "r")as f:yield (f.read())
sleep(1)
if (done[0] == True): break
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir), "r")as f:log = f.read()
print(log)
yield log
def change_sr2(sr2,if_f0_3):
if(if_f0_3==""):return "pretrained/f0G%s.pth"%sr2,"pretrained/f0D%s.pth"%sr2
else:return "pretrained/G%s.pth"%sr2,"pretrained/D%s.pth"%sr2
#but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
def click_train(exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17):
#生成filelist
exp_dir="%s/logs/%s"%(now_dir,exp_dir1)
os.makedirs(exp_dir,exist_ok=True)
gt_wavs_dir="%s/0_gt_wavs"%(exp_dir)
co256_dir="%s/3_feature256"%(exp_dir)
if(if_f0_3==""):
f0_dir = "%s/2a_f0" % (exp_dir)
f0nsf_dir="%s/2b-f0nsf"%(exp_dir)
names=set([name.split(".")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(".")[0]for name in os.listdir(co256_dir)])&set([name.split(".")[0]for name in os.listdir(f0_dir)])&set([name.split(".")[0]for name in os.listdir(f0nsf_dir)])
else:
names=set([name.split(".")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(".")[0]for name in os.listdir(co256_dir)])
opt=[]
for name in names:
if (if_f0_3 == ""):
opt.append("%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,f0_dir.replace("\\","\\\\"),name,f0nsf_dir.replace("\\","\\\\"),name,spk_id5))
else:
opt.append("%s/%s.wav|%s/%s.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,spk_id5))
2023-04-09 18:02:34 +02:00
if (if_f0_3 == ""):
opt.append("%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"%(now_dir,sr2,now_dir,now_dir,now_dir,spk_id5))
else:
2023-04-13 16:25:32 +02:00
opt.append("%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"%(now_dir,sr2,now_dir,spk_id5))
2023-03-31 11:54:38 +02:00
with open("%s/filelist.txt"%exp_dir,"w")as f:f.write("\n".join(opt))
print("write filelist done")
#生成config#无需生成config
# cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
2023-04-01 11:29:14 +02:00
print("use gpus:",gpus16)
if gpus16:
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
else:
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
2023-03-31 11:54:38 +02:00
print(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)
p.wait()
return "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"
2023-03-31 11:54:38 +02:00
# but4.click(train_index, [exp_dir1], info3)
def train_index(exp_dir1):
exp_dir="%s/logs/%s"%(now_dir,exp_dir1)
os.makedirs(exp_dir,exist_ok=True)
feature_dir="%s/3_feature256"%(exp_dir)
if(os.path.exists(feature_dir)==False):return "请先进行特征提取!"
listdir_res=list(os.listdir(feature_dir))
if(len(listdir_res)==0):return "请先进行特征提取!"
npys = []
for name in sorted(listdir_res):
phone = np.load("%s/%s" % (feature_dir, name))
npys.append(phone)
big_npy = np.concatenate(npys, 0)
np.save("%s/total_fea.npy"%exp_dir, big_npy)
n_ivf = big_npy.shape[0] // 39
infos=[]
infos.append("%s,%s"%(big_npy.shape,n_ivf))
yield "\n".join(infos)
index = faiss.index_factory(256, "IVF%s,Flat"%n_ivf)
infos.append("training")
yield "\n".join(infos)
index_ivf = faiss.extract_index_ivf(index) #
index_ivf.nprobe = int(np.power(n_ivf,0.3))
index.train(big_npy)
faiss.write_index(index, '%s/trained_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
infos.append("adding")
yield "\n".join(infos)
index.add(big_npy)
faiss.write_index(index, '%s/added_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
infos.append("成功构建索引, added_IVF%s_Flat_nprobe_%s.index"%(n_ivf,index_ivf.nprobe))
2023-03-31 11:54:38 +02:00
yield "\n".join(infos)
#but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17):
infos=[]
def get_info_str(strr):
infos.append(strr)
return "\n".join(infos)
os.makedirs("%s/logs/%s"%(now_dir,exp_dir1),exist_ok=True)
#########step1:处理数据
open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir1), "w").close()
cmd=python_cmd + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "%(trainset_dir4,sr_dict[sr2],ncpu,now_dir,exp_dir1)+str(noparallel)
2023-03-31 11:54:38 +02:00
yield get_info_str("step1:正在处理数据")
yield get_info_str(cmd)
p = Popen(cmd, shell=True)
p.wait()
with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir1), "r")as f: print(f.read())
#########step2a:提取音高
open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir1), "w")
if(if_f0_3==""):
yield get_info_str("step2a:正在提取音高")
cmd=python_cmd + " extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir1,np7,f0method8)
2023-03-31 11:54:38 +02:00
yield get_info_str(cmd)
p = Popen(cmd, shell=True,cwd=now_dir)
p.wait()
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir1), "r")as f:print(f.read())
else:yield get_info_str("step2a:无需提取音高")
#######step2b:提取特征
yield get_info_str("step2b:正在提取特征")
gpus=gpus16.split("-")
leng=len(gpus)
ps=[]
for idx,n_g in enumerate(gpus):
cmd=python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s"%(device,leng,idx,n_g,now_dir,exp_dir1)
2023-03-31 11:54:38 +02:00
yield get_info_str(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
ps.append(p)
for p in ps:p.wait()
with open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir1), "r")as f:print(f.read())
#######step3a:训练模型
yield get_info_str("step3a:正在训练模型")
#生成filelist
exp_dir="%s/logs/%s"%(now_dir,exp_dir1)
gt_wavs_dir="%s/0_gt_wavs"%(exp_dir)
co256_dir="%s/3_feature256"%(exp_dir)
if(if_f0_3==""):
f0_dir = "%s/2a_f0" % (exp_dir)
f0nsf_dir="%s/2b-f0nsf"%(exp_dir)
names=set([name.split(".")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(".")[0]for name in os.listdir(co256_dir)])&set([name.split(".")[0]for name in os.listdir(f0_dir)])&set([name.split(".")[0]for name in os.listdir(f0nsf_dir)])
else:
names=set([name.split(".")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(".")[0]for name in os.listdir(co256_dir)])
opt=[]
for name in names:
if (if_f0_3 == ""):
opt.append("%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,f0_dir.replace("\\","\\\\"),name,f0nsf_dir.replace("\\","\\\\"),name,spk_id5))
else:
opt.append("%s/%s.wav|%s/%s.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,spk_id5))
2023-04-09 18:02:34 +02:00
if (if_f0_3 == ""):
opt.append("%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"%(now_dir,sr2,now_dir,now_dir,now_dir,spk_id5))
else:
2023-04-13 16:25:32 +02:00
opt.append("%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"%(now_dir,sr2,now_dir,spk_id5))
2023-03-31 11:54:38 +02:00
with open("%s/filelist.txt"%exp_dir,"w")as f:f.write("\n".join(opt))
yield get_info_str("write filelist done")
2023-04-01 11:29:14 +02:00
if gpus16:
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
else:
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
2023-03-31 11:54:38 +02:00
yield get_info_str(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)
p.wait()
yield get_info_str("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")
2023-03-31 11:54:38 +02:00
#######step3b:训练索引
feature_dir="%s/3_feature256"%(exp_dir)
npys = []
listdir_res=list(os.listdir(feature_dir))
for name in sorted(listdir_res):
phone = np.load("%s/%s" % (feature_dir, name))
npys.append(phone)
big_npy = np.concatenate(npys, 0)
np.save("%s/total_fea.npy"%exp_dir, big_npy)
n_ivf = big_npy.shape[0] // 39
yield get_info_str("%s,%s"%(big_npy.shape,n_ivf))
index = faiss.index_factory(256, "IVF%s,Flat"%n_ivf)
yield get_info_str("training index")
index_ivf = faiss.extract_index_ivf(index) #
index_ivf.nprobe = int(np.power(n_ivf,0.3))
index.train(big_npy)
faiss.write_index(index, '%s/trained_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
yield get_info_str("adding index")
index.add(big_npy)
faiss.write_index(index, '%s/added_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
yield get_info_str("成功构建索引, added_IVF%s_Flat_nprobe_%s.index"%(n_ivf,index_ivf.nprobe))
2023-03-31 11:54:38 +02:00
yield get_info_str("全流程结束!")
# ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
def change_info_(ckpt_path):
if(os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path),"train.log"))==False):return {"__type__": "update"},{"__type__": "update"}
try:
with open(ckpt_path.replace(os.path.basename(ckpt_path),"train.log"),"r")as f:
info=eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
sr,f0=info["sample_rate"],info["if_f0"]
return sr,str(f0)
except:
traceback.print_exc()
return {"__type__": "update"}, {"__type__": "update"}
with gr.Blocks() as app:
gr.Markdown(value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>使用需遵守的协议-LICENSE.txt</b>."))
2023-03-31 11:54:38 +02:00
with gr.Tabs():
with gr.TabItem(i18n("模型推理")):
2023-03-31 11:54:38 +02:00
with gr.Row():
sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names))
refresh_button = gr.Button(i18n("刷新音色列表"), variant="primary")
2023-03-31 11:54:38 +02:00
refresh_button.click(
fn=change_choices,
inputs=[],
outputs=[sid0]
)
clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary")
spk_item = gr.Slider(minimum=0, maximum=2333, step=1, label=i18n("请选择说话人id"), value=0, visible=False, interactive=True)
2023-03-31 11:54:38 +02:00
clean_button.click(
fn=clean,
inputs=[],
outputs=[sid0]
)
sid0.change(
fn=get_vc,
inputs=[sid0],
outputs=[spk_item],
)
with gr.Group():
gr.Markdown(value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. "))
2023-03-31 11:54:38 +02:00
with gr.Row():
with gr.Column():
vc_transform0 = gr.Number(label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0)
input_audio0 = gr.Textbox(label=i18n("输入待处理音频文件路径(默认是正确格式示例)"),value="E:\\codes\\py39\\vits_vc_gpu_train\\todo-songs\\冬之花clip1.wav")
f0method0=gr.Radio(label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比"), choices=["pm","harvest"],value="pm", interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Column():
file_index1 = gr.Textbox(label=i18n("特征检索库文件路径"),value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\added_IVF677_Flat_nprobe_7.index", interactive=True)
file_big_npy1 = gr.Textbox(label=i18n("特征文件路径"),value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", interactive=True)
2023-03-31 11:54:38 +02:00
index_rate1 = gr.Slider(minimum=0, maximum=1,label='检索特征占比', value=1,interactive=True)
f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"))
but0=gr.Button(i18n("转换"), variant="primary")
2023-03-31 11:54:38 +02:00
with gr.Column():
vc_output1 = gr.Textbox(label=i18n("输出信息"))
vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)"))
2023-03-31 11:54:38 +02:00
but0.click(vc_single, [spk_item, input_audio0, vc_transform0,f0_file,f0method0,file_index1,file_big_npy1,index_rate1], [vc_output1, vc_output2])
with gr.Group():
gr.Markdown(value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. "))
2023-03-31 11:54:38 +02:00
with gr.Row():
with gr.Column():
vc_transform1 = gr.Number(label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0)
opt_input = gr.Textbox(label=i18n("指定输出文件夹"),value="opt")
f0method1=gr.Radio(label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比"), choices=["pm","harvest"],value="pm", interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Column():
file_index2 = gr.Textbox(label=i18n("特征检索库文件路径"),value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\added_IVF677_Flat_nprobe_7.index", interactive=True)
file_big_npy2 = gr.Textbox(label=i18n("特征文件路径"),value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", interactive=True)
index_rate2 = gr.Slider(minimum=0, maximum=1,label=i18n("检索特征占比"), value=1,interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Column():
dir_input = gr.Textbox(label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),value="E:\codes\py39\\vits_vc_gpu_train\\todo-songs")
inputs = gr.File(file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹"))
but1=gr.Button(i18n("转换"), variant="primary")
vc_output3 = gr.Textbox(label=i18n("输出信息"))
2023-03-31 11:54:38 +02:00
but1.click(vc_multi, [spk_item, dir_input,opt_input,inputs, vc_transform1,f0method1,file_index2,file_big_npy2,index_rate2], [vc_output3])
with gr.TabItem(i18n("伴奏人声分离")):
2023-03-31 11:54:38 +02:00
with gr.Group():
gr.Markdown(value=i18n("人声伴奏分离批量处理, 使用UVR5模型. <br>不带和声用HP2, 带和声且提取的人声不需要和声用HP5<br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)"))
2023-03-31 11:54:38 +02:00
with gr.Row():
with gr.Column():
dir_wav_input = gr.Textbox(label=i18n("输入待处理音频文件夹路径"),value="E:\\codes\\py39\\vits_vc_gpu_train\\todo-songs")
wav_inputs = gr.File(file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹"))
2023-03-31 11:54:38 +02:00
with gr.Column():
model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names)
opt_vocal_root = gr.Textbox(label=i18n("指定输出人声文件夹"),value="opt")
opt_ins_root = gr.Textbox(label=i18n("指定输出乐器文件夹"),value="opt")
but2=gr.Button(i18n("转换"), variant="primary")
vc_output4 = gr.Textbox(label=i18n("输出信息"))
2023-03-31 11:54:38 +02:00
but2.click(uvr, [model_choose, dir_wav_input,opt_vocal_root,wav_inputs,opt_ins_root], [vc_output4])
with gr.TabItem(i18n("训练")):
gr.Markdown(value=i18n("step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. "))
2023-03-31 11:54:38 +02:00
with gr.Row():
exp_dir1 = gr.Textbox(label=i18n("输入实验名"),value="mi-test")
sr2 = gr.Radio(label=i18n("目标采样率"), choices=["32k","40k","48k"],value="40k", interactive=True)
if_f0_3 = gr.Radio(label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"), choices=["",""],value="", interactive=True)
with gr.Group():#暂时单人的, 后面支持最多4人的#数据处理
gr.Markdown(value=i18n("step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. "))
2023-03-31 11:54:38 +02:00
with gr.Row():
trainset_dir4 = gr.Textbox(label=i18n("输入训练文件夹路径"),value="E:\\语音音频+标注\\米津玄师\\src")
spk_id5 = gr.Slider(minimum=0, maximum=4, step=1, label=i18n("请指定说话人id"), value=0,interactive=True)
but1=gr.Button(i18n("处理数据"), variant="primary")
info1=gr.Textbox(label=i18n("输出信息"),value="")
2023-03-31 11:54:38 +02:00
but1.click(preprocess_dataset,[trainset_dir4,exp_dir1,sr2],[info1])
with gr.Group():
gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)"))
2023-03-31 11:54:38 +02:00
with gr.Row():
with gr.Column():
gpus6 = gr.Textbox(label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),value=gpus,interactive=True)
gpu_info9 = gr.Textbox(label=i18n("显卡信息"),value=gpu_info)
2023-03-31 11:54:38 +02:00
with gr.Column():
np7 = gr.Slider(minimum=0, maximum=ncpu, step=1, label=i18n("提取音高使用的CPU进程数"), value=ncpu,interactive=True)
f0method8 = gr.Radio(label=i18n("选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"), choices=["pm", "harvest","dio"], value="harvest", interactive=True)
but2=gr.Button(i18n("特征提取"), variant="primary")
info2=gr.Textbox(label=i18n("输出信息"),value="",max_lines=8)
2023-03-31 11:54:38 +02:00
but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1],[info2])
with gr.Group():
gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
2023-03-31 11:54:38 +02:00
with gr.Row():
save_epoch10 = gr.Slider(minimum=0, maximum=50, step=1, label=i18n("保存频率save_every_epoch"), value=5,interactive=True)
total_epoch11 = gr.Slider(minimum=0, maximum=1000, step=1, label=i18n("总训练轮数total_epoch"), value=20,interactive=True)
2023-04-09 17:21:05 +02:00
batch_size12 = gr.Slider(minimum=0, maximum=32, step=1, label='每张显卡的batch_size', value=4,interactive=True)
if_save_latest13 = gr.Radio(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), choices=["", ""], value="", interactive=True)
if_cache_gpu17 = gr.Radio(label=i18n("是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速"), choices=["", ""], value="", interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Row():
pretrained_G14 = gr.Textbox(label=i18n("加载预训练底模G路径"), value="pretrained/f0G40k.pth",interactive=True)
pretrained_D15 = gr.Textbox(label=i18n("加载预训练底模D路径"), value="pretrained/f0D40k.pth",interactive=True)
2023-03-31 11:54:38 +02:00
sr2.change(change_sr2, [sr2,if_f0_3], [pretrained_G14,pretrained_D15])
if_f0_3.change(change_f0, [if_f0_3, sr2], [np7, f0method8, pretrained_G14, pretrained_D15])
gpus16 = gr.Textbox(label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"), value=gpus,interactive=True)
but3 = gr.Button(i18n("训练模型"), variant="primary")
but4 = gr.Button(i18n("训练特征索引"), variant="primary")
but5 = gr.Button(i18n("一键训练"), variant="primary")
info3 = gr.Textbox(label=i18n("输出信息"), value="",max_lines=10)
2023-03-31 11:54:38 +02:00
but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17],info3)
but4.click(train_index,[exp_dir1],info3)
but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,gpus6,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17],info3)
with gr.TabItem(i18n("ckpt处理")):
2023-03-31 11:54:38 +02:00
with gr.Group():
gr.Markdown(value=i18n("模型融合, 可用于测试音色融合"))
2023-03-31 11:54:38 +02:00
with gr.Row():
ckpt_a = gr.Textbox(label=i18n("A模型路径"), value="", interactive=True)
ckpt_b = gr.Textbox(label=i18n("B模型路径"), value="", interactive=True)
alpha_a = gr.Slider(minimum=0, maximum=1, label=i18n("A模型权重"), value=0.5, interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Row():
sr_ = gr.Radio(label=i18n("目标采样率"), choices=["32k","40k","48k"],value="40k", interactive=True)
if_f0_ = gr.Radio(label=i18n("模型是否带音高指导"), choices=["",""],value="", interactive=True)
info__ = gr.Textbox(label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True)
name_to_save0=gr.Textbox(label=i18n("保存的模型名不带后缀"), value="", max_lines=1, interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Row():
but6 = gr.Button(i18n("融合"), variant="primary")
info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
2023-03-31 11:54:38 +02:00
but6.click(merge, [ckpt_a,ckpt_b,alpha_a,sr_,if_f0_,info__,name_to_save0], info4)#def merge(path1,path2,alpha1,sr,f0,info):
with gr.Group():
gr.Markdown(value=i18n("修改模型信息(仅支持weights文件夹下提取的小模型文件)"))
2023-03-31 11:54:38 +02:00
with gr.Row():
ckpt_path0 = gr.Textbox(label=i18n("模型路径"), value="", interactive=True)
info_=gr.Textbox(label=i18n("要改的模型信息"), value="", max_lines=8, interactive=True)
name_to_save1=gr.Textbox(label=i18n("保存的文件名, 默认空为和源文件同名"), value="", max_lines=8, interactive=True)
2023-03-31 11:54:38 +02:00
with gr.Row():
but7 = gr.Button(i18n("修改"), variant="primary")
info5 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
2023-03-31 11:54:38 +02:00
but7.click(change_info, [ckpt_path0,info_,name_to_save1], info5)
with gr.Group():
gr.Markdown(value=i18n("查看模型信息(仅支持weights文件夹下提取的小模型文件)"))
2023-03-31 11:54:38 +02:00
with gr.Row():
ckpt_path1 = gr.Textbox(label=i18n("模型路径"), value="", interactive=True)
but8 = gr.Button(i18n("查看"), variant="primary")
info6 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
2023-03-31 11:54:38 +02:00
but8.click(show_info, [ckpt_path1], info6)
with gr.Group():
gr.Markdown(value=i18n("模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况"))
2023-03-31 11:54:38 +02:00
with gr.Row():
ckpt_path2 = gr.Textbox(label=i18n("模型路径"), value="E:\\codes\\py39\\logs\\mi-test_f0_48k\\G_23333.pth", interactive=True)
save_name = gr.Textbox(label=i18n("保存名"), value="", interactive=True)
sr__ = gr.Radio(label=i18n("目标采样率"), choices=["32k","40k","48k"],value="40k", interactive=True)
if_f0__ = gr.Radio(label=i18n("模型是否带音高指导,1是0否"), choices=["1","0"],value="1", interactive=True)
info___ = gr.Textbox(label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True)
but9 = gr.Button(i18n("提取"), variant="primary")
info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
2023-03-31 11:54:38 +02:00
ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
but9.click(extract_small_model, [ckpt_path2,save_name,sr__,if_f0__,info___], info7)
with gr.TabItem(i18n("招募音高曲线前端编辑器")):
gr.Markdown(value=i18n("加开发群联系我xxxxx"))
with gr.TabItem(i18n("点击查看交流、问题反馈群号")):
gr.Markdown(value=i18n("xxxxx"))
2023-03-31 11:54:38 +02:00
if iscolab:
2023-04-01 09:38:04 +02:00
app.queue(concurrency_count=511, max_size=1022).launch(share=True)
else:
app.queue(concurrency_count=511, max_size=1022).launch(server_name="0.0.0.0",inbrowser=not noautoopen,server_port=listen_port,quiet=True)