1
0
mirror of synced 2024-11-24 07:30:16 +01:00

Format code (#275)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
github-actions[bot] 2023-05-14 07:52:36 +00:00 committed by GitHub
parent 32437314b8
commit 6a3eaef090
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 295 additions and 158 deletions

View File

@ -1,4 +1,5 @@
import os, traceback, sys, parselmouth import os, traceback, sys, parselmouth
now_dir = os.getcwd() now_dir = os.getcwd()
sys.path.append(now_dir) sys.path.append(now_dir)
from my_utils import load_audio from my_utils import load_audio

View File

@ -18,9 +18,12 @@ from fairseq import checkpoint_utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():device="cuda" if torch.cuda.is_available():
elif torch.backends.mps.is_available():device="mps" device = "cuda"
else:device="cpu" elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
f = open("%s/extract_f0_feature.log" % exp_dir, "a+") f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
@ -36,7 +39,9 @@ model_path = "hubert_base.pt"
printt(exp_dir) printt(exp_dir)
wavPath = "%s/1_16k_wavs" % exp_dir wavPath = "%s/1_16k_wavs" % exp_dir
outPath = "%s/3_feature256" % exp_dir if version=="v1"else "%s/3_feature768" % exp_dir outPath = (
"%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
)
os.makedirs(outPath, exist_ok=True) os.makedirs(outPath, exist_ok=True)
@ -94,7 +99,9 @@ else:
} }
with torch.no_grad(): with torch.no_grad():
logits = model.extract_features(**inputs) logits = model.extract_features(**inputs)
feats = model.final_proj(logits[0])if version=="v1"else logits[0] feats = (
model.final_proj(logits[0]) if version == "v1" else logits[0]
)
feats = feats.squeeze(0).float().cpu().numpy() feats = feats.squeeze(0).float().cpu().numpy()
if np.isnan(feats).sum() == 0: if np.isnan(feats).sum() == 0:

View File

@ -1,4 +1,5 @@
import torch, os, traceback, sys, warnings, shutil, numpy as np import torch, os, traceback, sys, warnings, shutil, numpy as np
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
from multiprocessing import cpu_count from multiprocessing import cpu_count
import threading import threading
@ -70,7 +71,12 @@ else:
gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练") gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
default_batch_size = 1 default_batch_size = 1
gpus = "-".join([i[0] for i in gpu_infos]) gpus = "-".join([i[0] for i in gpu_infos])
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono,SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono from infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from scipy.io import wavfile from scipy.io import wavfile
from fairseq import checkpoint_utils from fairseq import checkpoint_utils
import gradio as gr import gradio as gr
@ -144,7 +150,7 @@ def vc_single(
index_rate, index_rate,
filter_radius, filter_radius,
resample_sr, resample_sr,
rms_mix_rate rms_mix_rate,
): # spk_item, input_audio0, vc_transform0,f0_file,f0method0 ): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
global tgt_sr, net_g, vc, hubert_model, version global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None: if input_audio_path is None:
@ -153,20 +159,24 @@ def vc_single(
try: try:
audio = load_audio(input_audio_path, 16000) audio = load_audio(input_audio_path, 16000)
audio_max = np.abs(audio).max() / 0.95 audio_max = np.abs(audio).max() / 0.95
if(audio_max>1): if audio_max > 1:
audio /= audio_max audio /= audio_max
times = [0, 0, 0] times = [0, 0, 0]
if hubert_model == None: if hubert_model == None:
load_hubert() load_hubert()
if_f0 = cpt.get("f0", 1) if_f0 = cpt.get("f0", 1)
file_index = ( file_index = (
(
file_index.strip(" ") file_index.strip(" ")
.strip('"') .strip('"')
.strip("\n") .strip("\n")
.strip('"') .strip('"')
.strip(" ") .strip(" ")
.replace("trained", "added") .replace("trained", "added")
)if file_index!=""else file_index2 # 防止小白写错,自动帮他替换掉 )
if file_index != ""
else file_index2
) # 防止小白写错,自动帮他替换掉
# file_big_npy = ( # file_big_npy = (
# file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ") # file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
# ) # )
@ -190,10 +200,19 @@ def vc_single(
version, version,
f0_file=f0_file, f0_file=f0_file,
) )
if(resample_sr>=16000 and tgt_sr!=resample_sr): if resample_sr >= 16000 and tgt_sr != resample_sr:
tgt_sr = resample_sr tgt_sr = resample_sr
index_info="Using index:%s."%file_index if os.path.exists(file_index)else"Index not used." index_info = (
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss"%(index_info,times[0],times[1],times[2]), (tgt_sr, audio_opt) "Using index:%s." % file_index
if os.path.exists(file_index)
else "Index not used."
)
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % (
index_info,
times[0],
times[1],
times[2],
), (tgt_sr, audio_opt)
except: except:
info = traceback.format_exc() info = traceback.format_exc()
print(info) print(info)
@ -213,7 +232,7 @@ def vc_multi(
index_rate, index_rate,
filter_radius, filter_radius,
resample_sr, resample_sr,
rms_mix_rate rms_mix_rate,
): ):
try: try:
dir_path = ( dir_path = (
@ -243,7 +262,7 @@ def vc_multi(
index_rate, index_rate,
filter_radius, filter_radius,
resample_sr, resample_sr,
rms_mix_rate rms_mix_rate,
) )
if "Success" in info: if "Success" in info:
try: try:
@ -342,14 +361,18 @@ def get_vc(sid):
###楼下不这么折腾清理不干净 ###楼下不这么折腾清理不干净
if_f0 = cpt.get("f0", 1) if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1") version = cpt.get("version", "v1")
if (version == "v1"): if version == "v1":
if if_f0 == 1: if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) net_g = SynthesizerTrnMs256NSFsid(
*cpt["config"], is_half=config.is_half
)
else: else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
elif (version == "v2"): elif version == "v2":
if if_f0 == 1: if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) net_g = SynthesizerTrnMs768NSFsid(
*cpt["config"], is_half=config.is_half
)
else: else:
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
del net_g, cpt del net_g, cpt
@ -364,12 +387,12 @@ def get_vc(sid):
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
if_f0 = cpt.get("f0", 1) if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1") version = cpt.get("version", "v1")
if(version=="v1"): if version == "v1":
if if_f0 == 1: if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
else: else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
elif(version=="v2"): elif version == "v2":
if if_f0 == 1: if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
else: else:
@ -396,12 +419,16 @@ def change_choices():
for name in files: for name in files:
if name.endswith(".index") and "trained" not in name: if name.endswith(".index") and "trained" not in name:
index_paths.append("%s/%s" % (root, name)) index_paths.append("%s/%s" % (root, name))
return {"choices": sorted(names), "__type__": "update"},{"choices": sorted(index_paths), "__type__": "update"} return {"choices": sorted(names), "__type__": "update"}, {
"choices": sorted(index_paths),
"__type__": "update",
}
def clean(): def clean():
return {"value": "", "__type__": "update"} return {"value": "", "__type__": "update"}
sr_dict = { sr_dict = {
"32k": 32000, "32k": 32000,
"40k": 40000, "40k": 40000,
@ -514,7 +541,10 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
leng = len(gpus) leng = len(gpus)
ps = [] ps = []
for idx, n_g in enumerate(gpus): for idx, n_g in enumerate(gpus):
cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s %s" % ( cmd = (
config.python_cmd
+ " extract_feature_print.py %s %s %s %s %s/logs/%s %s"
% (
config.device, config.device,
leng, leng,
idx, idx,
@ -523,6 +553,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
exp_dir, exp_dir,
version19, version19,
) )
)
print(cmd) print(cmd)
p = Popen( p = Popen(
cmd, shell=True, cwd=now_dir cmd, shell=True, cwd=now_dir
@ -551,17 +582,29 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
def change_sr2(sr2, if_f0_3, version19): def change_sr2(sr2, if_f0_3, version19):
vis_v = True if sr2 == "40k" else False vis_v = True if sr2 == "40k" else False
if(sr2!="40k"):version19="v1" if sr2 != "40k":
version19 = "v1"
path_str = "" if version19 == "v1" else "_v2" path_str = "" if version19 == "v1" else "_v2"
version_state = {"visible": vis_v, "__type__": "update"} version_state = {"visible": vis_v, "__type__": "update"}
if(vis_v==False):version_state["value"]="v1" if vis_v == False:
version_state["value"] = "v1"
f0_str = "f0" if if_f0_3 else "" f0_str = "f0" if if_f0_3 else ""
return "pretrained%s/%sG%s.pth" % (path_str,f0_str,sr2), "pretrained%s/%sD%s.pth" % (path_str,f0_str,sr2),version_state return (
"pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2),
"pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2),
version_state,
)
def change_version19(sr2, if_f0_3, version19): def change_version19(sr2, if_f0_3, version19):
path_str = "" if version19 == "v1" else "_v2" path_str = "" if version19 == "v1" else "_v2"
f0_str = "f0" if if_f0_3 else "" f0_str = "f0" if if_f0_3 else ""
return "pretrained%s/%sG%s.pth" % (path_str,f0_str,sr2), "pretrained%s/%sD%s.pth" % (path_str,f0_str,sr2) return "pretrained%s/%sG%s.pth" % (
path_str,
f0_str,
sr2,
), "pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)
def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
path_str = "" if version19 == "v1" else "_v2" path_str = "" if version19 == "v1" else "_v2"
@ -577,6 +620,7 @@ def change_f0(if_f0_3, sr2,version19): # f0method8,pretrained_G14,pretrained_D1
"pretrained%s/D%s.pth" % (path_str, sr2), "pretrained%s/D%s.pth" % (path_str, sr2),
) )
# but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16]) # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
def click_train( def click_train(
exp_dir1, exp_dir1,
@ -598,7 +642,11 @@ def click_train(
exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
os.makedirs(exp_dir, exist_ok=True) os.makedirs(exp_dir, exist_ok=True)
gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
feature_dir = "%s/3_feature256" % (exp_dir)if version19=="v1"else "%s/3_feature768" % (exp_dir) feature_dir = (
"%s/3_feature256" % (exp_dir)
if version19 == "v1"
else "%s/3_feature768" % (exp_dir)
)
if if_f0_3: if if_f0_3:
f0_dir = "%s/2a_f0" % (exp_dir) f0_dir = "%s/2a_f0" % (exp_dir)
f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
@ -709,7 +757,11 @@ def click_train(
def train_index(exp_dir1, version19): def train_index(exp_dir1, version19):
exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
os.makedirs(exp_dir, exist_ok=True) os.makedirs(exp_dir, exist_ok=True)
feature_dir = "%s/3_feature256" % (exp_dir)if version19=="v1"else "%s/3_feature768" % (exp_dir) feature_dir = (
"%s/3_feature256" % (exp_dir)
if version19 == "v1"
else "%s/3_feature768" % (exp_dir)
)
if os.path.exists(feature_dir) == False: if os.path.exists(feature_dir) == False:
return "请先进行特征提取!" return "请先进行特征提取!"
listdir_res = list(os.listdir(feature_dir)) listdir_res = list(os.listdir(feature_dir))
@ -738,7 +790,8 @@ def train_index(exp_dir1,version19):
index.train(big_npy) index.train(big_npy)
faiss.write_index( faiss.write_index(
index, index,
"%s/trained_IVF%s_Flat_nprobe_%s_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe,version19), "%s/trained_IVF%s_Flat_nprobe_%s_%s.index"
% (exp_dir, n_ivf, index_ivf.nprobe, version19),
) )
# faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
infos.append("adding") infos.append("adding")
@ -750,7 +803,10 @@ def train_index(exp_dir1,version19):
index, index,
"%s/added_IVF%s_Flat_nprobe_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe), "%s/added_IVF%s_Flat_nprobe_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe),
) )
infos.append("成功构建索引added_IVF%s_Flat_nprobe_%s_%s.index" % (n_ivf, index_ivf.nprobe,version19)) infos.append(
"成功构建索引added_IVF%s_Flat_nprobe_%s_%s.index"
% (n_ivf, index_ivf.nprobe, version19)
)
# faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
# infos.append("成功构建索引added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19)) # infos.append("成功构建索引added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
yield "\n".join(infos) yield "\n".join(infos)
@ -786,7 +842,11 @@ def train1key(
preprocess_log_path = "%s/preprocess.log" % model_log_dir preprocess_log_path = "%s/preprocess.log" % model_log_dir
extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir
gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir
feature_dir = "%s/3_feature256" % model_log_dir if version19=="v1"else "%s/3_feature768" % model_log_dir feature_dir = (
"%s/3_feature256" % model_log_dir
if version19 == "v1"
else "%s/3_feature768" % model_log_dir
)
os.makedirs(model_log_dir, exist_ok=True) os.makedirs(model_log_dir, exist_ok=True)
#########step1:处理数据 #########step1:处理数据
@ -830,7 +890,8 @@ def train1key(
leng, leng,
idx, idx,
n_g, n_g,
model_log_dir,version19, model_log_dir,
version19,
) )
yield get_info_str(cmd) yield get_info_str(cmd)
p = Popen( p = Popen(
@ -981,7 +1042,8 @@ def train1key(
% (model_log_dir, n_ivf, index_ivf.nprobe, version19), % (model_log_dir, n_ivf, index_ivf.nprobe, version19),
) )
yield get_info_str( yield get_info_str(
"成功构建索引, added_IVF%s_Flat_nprobe_%s_%s.index" % (n_ivf, index_ivf.nprobe,version19) "成功构建索引, added_IVF%s_Flat_nprobe_%s_%s.index"
% (n_ivf, index_ivf.nprobe, version19)
) )
yield get_info_str(i18n("全流程结束!")) yield get_info_str(i18n("全流程结束!"))
@ -1155,7 +1217,9 @@ with gr.Blocks() as app:
choices=sorted(index_paths), choices=sorted(index_paths),
interactive=True, interactive=True,
) )
refresh_button.click(fn=change_choices, inputs=[], outputs=[sid0, file_index2]) refresh_button.click(
fn=change_choices, inputs=[], outputs=[sid0, file_index2]
)
# file_big_npy1 = gr.Textbox( # file_big_npy1 = gr.Textbox(
# label=i18n("特征文件路径"), # label=i18n("特征文件路径"),
# value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
@ -1202,7 +1266,7 @@ with gr.Blocks() as app:
index_rate1, index_rate1,
filter_radius0, filter_radius0,
resample_sr0, resample_sr0,
rms_mix_rate0 rms_mix_rate0,
], ],
[vc_output1, vc_output2], [vc_output1, vc_output2],
) )
@ -1293,7 +1357,7 @@ with gr.Blocks() as app:
index_rate2, index_rate2,
filter_radius1, filter_radius1,
resample_sr1, resample_sr1,
rms_mix_rate1 rms_mix_rate1,
], ],
[vc_output3], [vc_output3],
) )
@ -1468,9 +1532,7 @@ with gr.Blocks() as app:
interactive=True, interactive=True,
) )
if_save_every_weights18 = gr.Radio( if_save_every_weights18 = gr.Radio(
label=i18n( label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),
"是否在每次保存时间点将最终小模型保存至weights文件夹"
),
choices=[i18n(""), i18n("")], choices=[i18n(""), i18n("")],
value=i18n(""), value=i18n(""),
interactive=True, interactive=True,
@ -1487,10 +1549,14 @@ with gr.Blocks() as app:
interactive=True, interactive=True,
) )
sr2.change( sr2.change(
change_sr2, [sr2, if_f0_3,version19], [pretrained_G14, pretrained_D15,version19] change_sr2,
[sr2, if_f0_3, version19],
[pretrained_G14, pretrained_D15, version19],
) )
version19.change( version19.change(
change_version19, [sr2, if_f0_3,version19], [pretrained_G14, pretrained_D15] change_version19,
[sr2, if_f0_3, version19],
[pretrained_G14, pretrained_D15],
) )
if_f0_3.change( if_f0_3.change(
change_f0, change_f0,
@ -1597,7 +1663,16 @@ with gr.Blocks() as app:
info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
but6.click( but6.click(
merge, merge,
[ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0,version_2], [
ckpt_a,
ckpt_b,
alpha_a,
sr_,
if_f0_,
info__,
name_to_save0,
version_2,
],
info4, info4,
) # def merge(path1,path2,alpha1,sr,f0,info): ) # def merge(path1,path2,alpha1,sr,f0,info):
with gr.Group(): with gr.Group():
@ -1666,7 +1741,9 @@ with gr.Blocks() as app:
) )
but9 = gr.Button(i18n("提取"), variant="primary") but9 = gr.Button(i18n("提取"), variant="primary")
info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
ckpt_path2.change(change_info_, [ckpt_path2], [sr__, if_f0__,version_1]) ckpt_path2.change(
change_info_, [ckpt_path2], [sr__, if_f0__, version_1]
)
but9.click( but9.click(
extract_small_model, extract_small_model,
[ckpt_path2, save_name, sr__, if_f0__, info___, version_1], [ckpt_path2, save_name, sr__, if_f0__, info___, version_1],
@ -1690,13 +1767,13 @@ with gr.Blocks() as app:
tab_faq = i18n("常见问题解答") tab_faq = i18n("常见问题解答")
with gr.TabItem(tab_faq): with gr.TabItem(tab_faq):
try: try:
if(tab_faq=="常见问题解答"): if tab_faq == "常见问题解答":
with open("docs/faq.md","r",encoding="utf8")as f:info=f.read() with open("docs/faq.md", "r", encoding="utf8") as f:
info = f.read()
else: else:
with open("docs/faq_en.md", "r")as f:info = f.read() with open("docs/faq_en.md", "r") as f:
gr.Markdown( info = f.read()
value=info gr.Markdown(value=info)
)
except: except:
gr.Markdown(traceback.format_exc()) gr.Markdown(traceback.format_exc())

View File

@ -59,6 +59,8 @@ class TextEncoder256(nn.Module):
m, logs = torch.split(stats, self.out_channels, dim=1) m, logs = torch.split(stats, self.out_channels, dim=1)
return m, logs, x_mask return m, logs, x_mask
class TextEncoder768(nn.Module): class TextEncoder768(nn.Module):
def __init__( def __init__(
self, self,
@ -105,6 +107,7 @@ class TextEncoder768(nn.Module):
m, logs = torch.split(stats, self.out_channels, dim=1) m, logs = torch.split(stats, self.out_channels, dim=1)
return m, logs, x_mask return m, logs, x_mask
class ResidualCouplingBlock(nn.Module): class ResidualCouplingBlock(nn.Module):
def __init__( def __init__(
self, self,
@ -635,6 +638,8 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
z = self.flow(z_p, x_mask, g=g, reverse=True) z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
return o, x_mask, (z, z_p, m_p, logs_p) return o, x_mask, (z, z_p, m_p, logs_p)
class SynthesizerTrnMs768NSFsid(nn.Module): class SynthesizerTrnMs768NSFsid(nn.Module):
def __init__( def __init__(
self, self,
@ -846,6 +851,8 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
z = self.flow(z_p, x_mask, g=g, reverse=True) z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec((z * x_mask)[:, :, :max_len], g=g) o = self.dec((z * x_mask)[:, :, :max_len], g=g)
return o, x_mask, (z, z_p, m_p, logs_p) return o, x_mask, (z, z_p, m_p, logs_p)
class SynthesizerTrnMs768NSFsid_nono(nn.Module): class SynthesizerTrnMs768NSFsid_nono(nn.Module):
def __init__( def __init__(
self, self,
@ -977,6 +984,7 @@ class MultiPeriodDiscriminator(torch.nn.Module):
return y_d_rs, y_d_gs, fmap_rs, fmap_gs return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class MultiPeriodDiscriminatorV2(torch.nn.Module): class MultiPeriodDiscriminatorV2(torch.nn.Module):
def __init__(self, use_spectral_norm=False): def __init__(self, use_spectral_norm=False):
super(MultiPeriodDiscriminatorV2, self).__init__() super(MultiPeriodDiscriminatorV2, self).__init__()

View File

@ -322,7 +322,11 @@ def get_hparams(init=True):
"-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k" "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
) )
parser.add_argument( parser.add_argument(
"-sw", "--save_every_weights", type=str, default="0", help="save the extracted model in weights directory when saving checkpoints" "-sw",
"--save_every_weights",
type=str,
default="0",
help="save the extracted model in weights directory when saving checkpoints",
) )
parser.add_argument( parser.add_argument(
"-v", "--version", type=str, required=True, help="model version" "-v", "--version", type=str, required=True, help="model version"

View File

@ -31,7 +31,8 @@ from data_utils import (
TextAudioCollate, TextAudioCollate,
DistributedBucketSampler, DistributedBucketSampler,
) )
if(hps.version=="v1"):
if hps.version == "v1":
from infer_pack.models import ( from infer_pack.models import (
SynthesizerTrnMs256NSFsid as RVC_Model_f0, SynthesizerTrnMs256NSFsid as RVC_Model_f0,
SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0,
@ -519,14 +520,25 @@ def train_and_evaluate(
epoch, epoch,
os.path.join(hps.model_dir, "D_{}.pth".format(2333333)), os.path.join(hps.model_dir, "D_{}.pth".format(2333333)),
) )
if(rank==0 and hps.save_every_weights=="1"): if rank == 0 and hps.save_every_weights == "1":
if hasattr(net_g, "module"): if hasattr(net_g, "module"):
ckpt = net_g.module.state_dict() ckpt = net_g.module.state_dict()
else: else:
ckpt = net_g.state_dict() ckpt = net_g.state_dict()
logger.info( logger.info(
"saving ckpt %s_e%s:%s" "saving ckpt %s_e%s:%s"
% (hps.name,epoch,savee(ckpt, hps.sample_rate, hps.if_f0, hps.name+"_e%s"%epoch, epoch,hps.version)) % (
hps.name,
epoch,
savee(
ckpt,
hps.sample_rate,
hps.if_f0,
hps.name + "_e%s" % epoch,
epoch,
hps.version,
),
)
) )
if rank == 0: if rank == 0:

View File

@ -9,6 +9,8 @@ from functools import lru_cache
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
input_audio_path2wav = {} input_audio_path2wav = {}
@lru_cache @lru_cache
def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
audio = input_audio_path2wav[input_audio_path] audio = input_audio_path2wav[input_audio_path]
@ -22,18 +24,29 @@ def cache_harvest_f0(input_audio_path,fs,f0max,f0min,frame_period):
f0 = pyworld.stonemask(audio, f0, t, fs) f0 = pyworld.stonemask(audio, f0, t, fs)
return f0 return f0
def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频2是输出音频,rate是2的占比 def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频2是输出音频,rate是2的占比
# print(data1.max(),data2.max()) # print(data1.max(),data2.max())
rms1 = librosa.feature.rms(y=data1, frame_length=sr1//2*2, hop_length=sr1//2)#每半秒一个点 rms1 = librosa.feature.rms(
y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
) # 每半秒一个点
rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
rms1 = torch.from_numpy(rms1) rms1 = torch.from_numpy(rms1)
rms1=F.interpolate(rms1.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze() rms1 = F.interpolate(
rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
).squeeze()
rms2 = torch.from_numpy(rms2) rms2 = torch.from_numpy(rms2)
rms2=F.interpolate(rms2.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze() rms2 = F.interpolate(
rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
).squeeze()
rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
data2*=(torch.pow(rms1,torch.tensor(1-rate))*torch.pow(rms2,torch.tensor(rate-1))).numpy() data2 *= (
torch.pow(rms1, torch.tensor(1 - rate))
* torch.pow(rms2, torch.tensor(rate - 1))
).numpy()
return data2 return data2
class VC(object): class VC(object):
def __init__(self, tgt_sr, config): def __init__(self, tgt_sr, config):
self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
@ -53,7 +66,16 @@ class VC(object):
self.t_max = self.sr * self.x_max # 免查询时长阈值 self.t_max = self.sr * self.x_max # 免查询时长阈值
self.device = config.device self.device = config.device
def get_f0(self, input_audio_path,x, p_len, f0_up_key, f0_method,filter_radius, inp_f0=None): def get_f0(
self,
input_audio_path,
x,
p_len,
f0_up_key,
f0_method,
filter_radius,
inp_f0=None,
):
global input_audio_path2wav global input_audio_path2wav
time_step = self.window / self.sr * 1000 time_step = self.window / self.sr * 1000
f0_min = 50 f0_min = 50
@ -79,7 +101,7 @@ class VC(object):
elif f0_method == "harvest": elif f0_method == "harvest":
input_audio_path2wav[input_audio_path] = x.astype(np.double) input_audio_path2wav[input_audio_path] = x.astype(np.double)
f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
if(filter_radius>2): if filter_radius > 2:
f0 = signal.medfilt(f0, 3) f0 = signal.medfilt(f0, 3)
f0 *= pow(2, f0_up_key / 12) f0 *= pow(2, f0_up_key / 12)
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
@ -184,10 +206,7 @@ class VC(object):
) )
else: else:
audio1 = ( audio1 = (
(net_g.infer(feats, p_len, sid)[0][0, 0]) (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
.data.cpu()
.float()
.numpy()
) )
del feats, p_len, padding_mask del feats, p_len, padding_mask
if torch.cuda.is_available(): if torch.cuda.is_available():
@ -270,7 +289,15 @@ class VC(object):
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
pitch, pitchf = None, None pitch, pitchf = None, None
if if_f0 == 1: if if_f0 == 1:
pitch, pitchf = self.get_f0(input_audio_path,audio_pad, p_len, f0_up_key, f0_method,filter_radius, inp_f0) pitch, pitchf = self.get_f0(
input_audio_path,
audio_pad,
p_len,
f0_up_key,
f0_method,
filter_radius,
inp_f0,
)
pitch = pitch[:p_len] pitch = pitch[:p_len]
pitchf = pitchf[:p_len] pitchf = pitchf[:p_len]
if self.device == "mps": if self.device == "mps":
@ -347,15 +374,16 @@ class VC(object):
)[self.t_pad_tgt : -self.t_pad_tgt] )[self.t_pad_tgt : -self.t_pad_tgt]
) )
audio_opt = np.concatenate(audio_opt) audio_opt = np.concatenate(audio_opt)
if(rms_mix_rate!=1): if rms_mix_rate != 1:
audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
if(resample_sr>=16000 and tgt_sr!=resample_sr): if resample_sr >= 16000 and tgt_sr != resample_sr:
audio_opt = librosa.resample( audio_opt = librosa.resample(
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
) )
audio_max = np.abs(audio_opt).max() / 0.99 audio_max = np.abs(audio_opt).max() / 0.99
max_int16 = 32768 max_int16 = 32768
if(audio_max>1):max_int16/=audio_max if audio_max > 1:
max_int16 /= audio_max
audio_opt = (audio_opt * max_int16).astype(np.int16) audio_opt = (audio_opt * max_int16).astype(np.int16)
del pitch, pitchf, sid del pitch, pitchf, sid
if torch.cuda.is_available(): if torch.cuda.is_available():