b4c653142d
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
86 lines
3.1 KiB
Python
86 lines
3.1 KiB
Python
from infer_pack.models_onnx_moess import SynthesizerTrnMs256NSFsidM
|
||
from infer_pack.models_onnx import SynthesizerTrnMs256NSFsidO
|
||
import torch
|
||
|
||
if __name__ == "__main__":
|
||
MoeVS = True # 模型是否为MoeVoiceStudio(原MoeSS)使用
|
||
|
||
ModelPath = "Shiroha/shiroha.pth" # 模型路径
|
||
ExportedPath = "model.onnx" # 输出路径
|
||
hidden_channels = 256 # hidden_channels,为768Vec做准备
|
||
cpt = torch.load(ModelPath, map_location="cpu")
|
||
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
||
print(*cpt["config"])
|
||
|
||
test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
|
||
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
||
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
||
test_pitchf = torch.rand(1, 200) # nsf基频
|
||
test_ds = torch.LongTensor([0]) # 说话人ID
|
||
test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
|
||
|
||
device = "cpu" # 导出时设备(不影响使用模型)
|
||
|
||
if MoeVS:
|
||
net_g = SynthesizerTrnMs256NSFsidM(
|
||
*cpt["config"], is_half=False
|
||
) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
||
net_g.load_state_dict(cpt["weight"], strict=False)
|
||
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
||
output_names = [
|
||
"audio",
|
||
]
|
||
torch.onnx.export(
|
||
net_g,
|
||
(
|
||
test_phone.to(device),
|
||
test_phone_lengths.to(device),
|
||
test_pitch.to(device),
|
||
test_pitchf.to(device),
|
||
test_ds.to(device),
|
||
test_rnd.to(device),
|
||
),
|
||
ExportedPath,
|
||
dynamic_axes={
|
||
"phone": [1],
|
||
"pitch": [1],
|
||
"pitchf": [1],
|
||
"rnd": [2],
|
||
},
|
||
do_constant_folding=False,
|
||
opset_version=16,
|
||
verbose=False,
|
||
input_names=input_names,
|
||
output_names=output_names,
|
||
)
|
||
else:
|
||
net_g = SynthesizerTrnMs256NSFsidO(
|
||
*cpt["config"], is_half=False
|
||
) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
||
net_g.load_state_dict(cpt["weight"], strict=False)
|
||
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds"]
|
||
output_names = [
|
||
"audio",
|
||
]
|
||
torch.onnx.export(
|
||
net_g,
|
||
(
|
||
test_phone.to(device),
|
||
test_phone_lengths.to(device),
|
||
test_pitch.to(device),
|
||
test_pitchf.to(device),
|
||
test_ds.to(device),
|
||
),
|
||
ExportedPath,
|
||
dynamic_axes={
|
||
"phone": [1],
|
||
"pitch": [1],
|
||
"pitchf": [1],
|
||
},
|
||
do_constant_folding=False,
|
||
opset_version=16,
|
||
verbose=False,
|
||
input_names=input_names,
|
||
output_names=output_names,
|
||
)
|