Add files via upload
This commit is contained in:
parent
605fbe118d
commit
a36ff01be1
@ -45,7 +45,8 @@ class FeatureInput(object):
|
||||
f0, t = pyworld.harvest(
|
||||
x.astype(np.double),
|
||||
fs=sr,
|
||||
f0_ceil=1100,
|
||||
f0_ceil=self.f0_max,
|
||||
f0_floor=self.f0_min,
|
||||
frame_period=1000 * self.hop / sr,
|
||||
)
|
||||
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
|
||||
@ -53,7 +54,8 @@ class FeatureInput(object):
|
||||
f0, t = pyworld.dio(
|
||||
x.astype(np.double),
|
||||
fs=sr,
|
||||
f0_ceil=1100,
|
||||
f0_ceil=self.f0_max,
|
||||
f0_floor=self.f0_min,
|
||||
frame_period=1000 * self.hop / sr,
|
||||
)
|
||||
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
|
||||
|
@ -1,5 +1,5 @@
|
||||
import os,sys,traceback
|
||||
device=sys.argv[1]
|
||||
# device=sys.argv[1]
|
||||
n_part=int(sys.argv[2])
|
||||
i_part=int(sys.argv[3])
|
||||
if len(sys.argv) == 5:
|
||||
@ -14,6 +14,7 @@ import torch.nn.functional as F
|
||||
import soundfile as sf
|
||||
import numpy as np
|
||||
from fairseq import checkpoint_utils
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
f = open("%s/extract_f0_feature.log"%exp_dir, "a+")
|
||||
def printt(strr):
|
||||
@ -48,7 +49,7 @@ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
||||
)
|
||||
model = models[0]
|
||||
model = model.to(device)
|
||||
printt("move model to "+device)
|
||||
printt("move model to %s"%device)
|
||||
if device != "cpu": model = model.half()
|
||||
model.eval()
|
||||
|
||||
|
1
go-web.bat
Normal file
1
go-web.bat
Normal file
@ -0,0 +1 @@
|
||||
runtime\python.exe infer-web.py --pycmd runtime\python.exe
|
30
infer-web.py
30
infer-web.py
@ -5,6 +5,16 @@ from subprocess import Popen
|
||||
from time import sleep
|
||||
import torch, os,traceback,sys,warnings,shutil,numpy as np
|
||||
import faiss
|
||||
now_dir=os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
tmp=os.path.join(now_dir,"TEMP")
|
||||
shutil.rmtree(tmp,ignore_errors=True)
|
||||
os.makedirs(tmp,exist_ok=True)
|
||||
os.makedirs(os.path.join(now_dir,"logs"),exist_ok=True)
|
||||
os.makedirs(os.path.join(now_dir,"weights"),exist_ok=True)
|
||||
os.environ["TEMP"]=tmp
|
||||
warnings.filterwarnings("ignore")
|
||||
torch.manual_seed(114514)
|
||||
from webui_locale import I18nAuto
|
||||
i18n = I18nAuto()
|
||||
#判断是否有能用来训练和加速推理的N卡
|
||||
@ -22,16 +32,6 @@ else:
|
||||
gpu_infos.append("%s\t%s"%(i,gpu_name))
|
||||
gpu_info="\n".join(gpu_infos)if if_gpu_ok==True and len(gpu_infos)>0 else "很遗憾您这没有能用的显卡来支持您训练"
|
||||
gpus="-".join([i[0]for i in gpu_infos])
|
||||
now_dir=os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
tmp=os.path.join(now_dir,"TEMP")
|
||||
shutil.rmtree(tmp,ignore_errors=True)
|
||||
os.makedirs(tmp,exist_ok=True)
|
||||
os.makedirs(os.path.join(now_dir,"logs"),exist_ok=True)
|
||||
os.makedirs(os.path.join(now_dir,"weights"),exist_ok=True)
|
||||
os.environ["TEMP"]=tmp
|
||||
warnings.filterwarnings("ignore")
|
||||
torch.manual_seed(114514)
|
||||
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
|
||||
from scipy.io import wavfile
|
||||
from fairseq import checkpoint_utils
|
||||
@ -563,7 +563,7 @@ with gr.Blocks() as app:
|
||||
total_epoch11 = gr.Slider(minimum=0, maximum=1000, step=1, label=i18n("总训练轮数total_epoch"), value=20,interactive=True)
|
||||
batch_size12 = gr.Slider(minimum=0, maximum=32, step=1, label='每张显卡的batch_size', value=4,interactive=True)
|
||||
if_save_latest13 = gr.Radio(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), choices=["是", "否"], value="否", interactive=True)
|
||||
if_cache_gpu17 = gr.Radio(label=i18n("是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速"), choices=["是", "否"], value="否", interactive=True)
|
||||
if_cache_gpu17 = gr.Radio(label=i18n("是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速"), choices=["是", "否"], value="是", interactive=True)
|
||||
with gr.Row():
|
||||
pretrained_G14 = gr.Textbox(label=i18n("加载预训练底模G路径"), value="pretrained/f0G40k.pth",interactive=True)
|
||||
pretrained_D15 = gr.Textbox(label=i18n("加载预训练底模D路径"), value="pretrained/f0D40k.pth",interactive=True)
|
||||
@ -624,10 +624,10 @@ with gr.Blocks() as app:
|
||||
ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
|
||||
but9.click(extract_small_model, [ckpt_path2,save_name,sr__,if_f0__,info___], info7)
|
||||
|
||||
with gr.TabItem(i18n("招募音高曲线前端编辑器")):
|
||||
gr.Markdown(value=i18n("加开发群联系我xxxxx"))
|
||||
with gr.TabItem(i18n("点击查看交流、问题反馈群号")):
|
||||
gr.Markdown(value=i18n("xxxxx"))
|
||||
# with gr.TabItem(i18n("招募音高曲线前端编辑器")):
|
||||
# gr.Markdown(value=i18n("加开发群联系我xxxxx"))
|
||||
# with gr.TabItem(i18n("点击查看交流、问题反馈群号")):
|
||||
# gr.Markdown(value=i18n("xxxxx"))
|
||||
|
||||
if iscolab:
|
||||
app.queue(concurrency_count=511, max_size=1022).launch(share=True)
|
||||
|
@ -15,6 +15,13 @@ import multiprocessing
|
||||
from my_utils import load_audio
|
||||
|
||||
mutex = multiprocessing.Lock()
|
||||
f = open("%s/preprocess.log"%exp_dir, "a+")
|
||||
def println(strr):
|
||||
mutex.acquire()
|
||||
print(strr)
|
||||
f.write("%s\n" % strr)
|
||||
f.flush()
|
||||
mutex.release()
|
||||
|
||||
class PreProcess():
|
||||
def __init__(self,sr,exp_dir):
|
||||
@ -35,18 +42,10 @@ class PreProcess():
|
||||
self.exp_dir=exp_dir
|
||||
self.gt_wavs_dir="%s/0_gt_wavs"%exp_dir
|
||||
self.wavs16k_dir="%s/1_16k_wavs"%exp_dir
|
||||
self.f = open("%s/preprocess.log"%exp_dir, "a+")
|
||||
os.makedirs(self.exp_dir,exist_ok=True)
|
||||
os.makedirs(self.gt_wavs_dir,exist_ok=True)
|
||||
os.makedirs(self.wavs16k_dir,exist_ok=True)
|
||||
|
||||
def println(self, strr):
|
||||
mutex.acquire()
|
||||
print(strr)
|
||||
self.f.write("%s\n" % strr)
|
||||
self.f.flush()
|
||||
mutex.release()
|
||||
|
||||
def norm_write(self,tmp_audio,idx0,idx1):
|
||||
tmp_audio = (tmp_audio / np.abs(tmp_audio).max() * (self.max * self.alpha)) + (1 - self.alpha) * tmp_audio
|
||||
wavfile.write("%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), self.sr, (tmp_audio*32768).astype(np.int16))
|
||||
@ -70,9 +69,9 @@ class PreProcess():
|
||||
tmp_audio = audio[start:]
|
||||
break
|
||||
self.norm_write(tmp_audio, idx0, idx1)
|
||||
self.println("%s->Suc."%path)
|
||||
println("%s->Suc."%path)
|
||||
except:
|
||||
self.println("%s->%s"%(path,traceback.format_exc()))
|
||||
println("%s->%s"%(path,traceback.format_exc()))
|
||||
|
||||
def pipeline_mp(self,infos):
|
||||
for path, idx0 in infos:
|
||||
@ -91,14 +90,14 @@ class PreProcess():
|
||||
ps.append(p)
|
||||
for p in ps:p.join()
|
||||
except:
|
||||
self.println("Fail. %s"%traceback.format_exc())
|
||||
println("Fail. %s"%traceback.format_exc())
|
||||
|
||||
def preprocess_trainset(inp_root, sr, n_p, exp_dir):
|
||||
pp=PreProcess(sr,exp_dir)
|
||||
pp.println("start preprocess")
|
||||
pp.println(sys.argv)
|
||||
println("start preprocess")
|
||||
println(sys.argv)
|
||||
pp.pipeline_mp_inp_dir(inp_root,n_p)
|
||||
pp.println("end preprocess")
|
||||
println("end preprocess")
|
||||
|
||||
if __name__=='__main__':
|
||||
preprocess_trainset(inp_root, sr, n_p, exp_dir)
|
||||
|
@ -35,6 +35,7 @@ class VC(object):
|
||||
x.astype(np.double),
|
||||
fs=self.sr,
|
||||
f0_ceil=f0_max,
|
||||
f0_floor=f0_min,
|
||||
frame_period=10,
|
||||
)
|
||||
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
|
||||
|
@ -13,7 +13,7 @@ class I18nAuto:
|
||||
if language == 'auto':
|
||||
language = locale.getdefaultlocale()[0]
|
||||
self.language = language
|
||||
print("Use Languane:", language)
|
||||
print("Use Language:", language)
|
||||
self.language_map = load_language_list(language)
|
||||
|
||||
def __call__(self, key):
|
||||
|
Loading…
Reference in New Issue
Block a user