2023-05-29 17:52:23 +02:00
|
|
|
import librosa
|
|
|
|
import numpy as np
|
2023-08-28 09:08:31 +02:00
|
|
|
import onnxruntime
|
2023-05-29 17:52:23 +02:00
|
|
|
import soundfile
|
|
|
|
|
2023-06-18 12:39:56 +02:00
|
|
|
|
2023-05-30 09:22:53 +02:00
|
|
|
class ContentVec:
|
|
|
|
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
2023-09-01 08:11:55 +02:00
|
|
|
print("Load model(s) from {}".format(vec_path))
|
2023-05-30 09:22:53 +02:00
|
|
|
if device == "cpu" or device is None:
|
|
|
|
providers = ["CPUExecutionProvider"]
|
|
|
|
elif device == "cuda":
|
|
|
|
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
2023-06-17 16:49:16 +02:00
|
|
|
elif device == "dml":
|
|
|
|
providers = ["DmlExecutionProvider"]
|
2023-05-29 17:52:23 +02:00
|
|
|
else:
|
|
|
|
raise RuntimeError("Unsportted Device")
|
|
|
|
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
|
|
|
|
|
|
|
def __call__(self, wav):
|
|
|
|
return self.forward(wav)
|
|
|
|
|
|
|
|
def forward(self, wav):
|
|
|
|
feats = wav
|
|
|
|
if feats.ndim == 2: # double channels
|
2023-05-30 09:22:53 +02:00
|
|
|
feats = feats.mean(-1)
|
2023-05-29 17:52:23 +02:00
|
|
|
assert feats.ndim == 1, feats.ndim
|
|
|
|
feats = np.expand_dims(np.expand_dims(feats, 0), 0)
|
|
|
|
onnx_input = {self.model.get_inputs()[0].name: feats}
|
|
|
|
logits = self.model.run(None, onnx_input)[0]
|
|
|
|
return logits.transpose(0, 2, 1)
|
|
|
|
|
|
|
|
|
|
|
|
def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
|
|
|
|
if f0_predictor == "pm":
|
2023-06-24 09:26:14 +02:00
|
|
|
from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
|
2023-05-30 09:22:53 +02:00
|
|
|
|
|
|
|
f0_predictor_object = PMF0Predictor(
|
|
|
|
hop_length=hop_length, sampling_rate=sampling_rate
|
|
|
|
)
|
2023-05-29 17:52:23 +02:00
|
|
|
elif f0_predictor == "harvest":
|
2023-06-24 12:06:17 +02:00
|
|
|
from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
|
|
|
|
HarvestF0Predictor,
|
|
|
|
)
|
2023-05-30 09:22:53 +02:00
|
|
|
|
|
|
|
f0_predictor_object = HarvestF0Predictor(
|
|
|
|
hop_length=hop_length, sampling_rate=sampling_rate
|
|
|
|
)
|
2023-05-29 17:52:23 +02:00
|
|
|
elif f0_predictor == "dio":
|
2023-06-24 09:26:14 +02:00
|
|
|
from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
|
2023-05-30 09:22:53 +02:00
|
|
|
|
|
|
|
f0_predictor_object = DioF0Predictor(
|
|
|
|
hop_length=hop_length, sampling_rate=sampling_rate
|
|
|
|
)
|
2023-05-29 17:52:23 +02:00
|
|
|
else:
|
|
|
|
raise Exception("Unknown f0 predictor")
|
|
|
|
return f0_predictor_object
|
|
|
|
|
|
|
|
|
2023-05-30 09:22:53 +02:00
|
|
|
class OnnxRVC:
|
2023-05-29 17:52:23 +02:00
|
|
|
def __init__(
|
2023-05-30 09:22:53 +02:00
|
|
|
self,
|
|
|
|
model_path,
|
|
|
|
sr=40000,
|
|
|
|
hop_size=512,
|
|
|
|
vec_path="vec-768-layer-12",
|
|
|
|
device="cpu",
|
|
|
|
):
|
2023-05-29 17:52:23 +02:00
|
|
|
vec_path = f"pretrained/{vec_path}.onnx"
|
|
|
|
self.vec_model = ContentVec(vec_path, device)
|
2023-05-30 09:22:53 +02:00
|
|
|
if device == "cpu" or device is None:
|
|
|
|
providers = ["CPUExecutionProvider"]
|
|
|
|
elif device == "cuda":
|
|
|
|
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
2023-06-17 16:49:16 +02:00
|
|
|
elif device == "dml":
|
|
|
|
providers = ["DmlExecutionProvider"]
|
2023-05-29 17:52:23 +02:00
|
|
|
else:
|
|
|
|
raise RuntimeError("Unsportted Device")
|
|
|
|
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
|
|
|
self.sampling_rate = sr
|
|
|
|
self.hop_size = hop_size
|
|
|
|
|
|
|
|
def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
|
|
|
|
onnx_input = {
|
2023-05-30 09:22:53 +02:00
|
|
|
self.model.get_inputs()[0].name: hubert,
|
|
|
|
self.model.get_inputs()[1].name: hubert_length,
|
|
|
|
self.model.get_inputs()[2].name: pitch,
|
|
|
|
self.model.get_inputs()[3].name: pitchf,
|
|
|
|
self.model.get_inputs()[4].name: ds,
|
|
|
|
self.model.get_inputs()[5].name: rnd,
|
|
|
|
}
|
2023-05-29 17:52:23 +02:00
|
|
|
return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
|
|
|
|
|
2023-05-30 09:22:53 +02:00
|
|
|
def inference(
|
|
|
|
self,
|
|
|
|
raw_path,
|
|
|
|
sid,
|
|
|
|
f0_method="dio",
|
|
|
|
f0_up_key=0,
|
|
|
|
pad_time=0.5,
|
|
|
|
cr_threshold=0.02,
|
|
|
|
):
|
2023-05-29 17:52:23 +02:00
|
|
|
f0_min = 50
|
|
|
|
f0_max = 1100
|
|
|
|
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
|
|
|
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
|
|
|
f0_predictor = get_f0_predictor(
|
2023-05-30 09:22:53 +02:00
|
|
|
f0_method,
|
|
|
|
hop_length=self.hop_size,
|
|
|
|
sampling_rate=self.sampling_rate,
|
|
|
|
threshold=cr_threshold,
|
|
|
|
)
|
2023-05-29 17:52:23 +02:00
|
|
|
wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
|
|
|
|
org_length = len(wav)
|
2023-05-30 09:22:53 +02:00
|
|
|
if org_length / sr > 50.0:
|
2023-05-29 17:52:23 +02:00
|
|
|
raise RuntimeError("Reached Max Length")
|
|
|
|
|
|
|
|
wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
|
|
|
|
wav16k = wav16k
|
|
|
|
|
|
|
|
hubert = self.vec_model(wav16k)
|
|
|
|
hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
|
|
|
|
hubert_length = hubert.shape[1]
|
|
|
|
|
|
|
|
pitchf = f0_predictor.compute_f0(wav, hubert_length)
|
|
|
|
pitchf = pitchf * 2 ** (f0_up_key / 12)
|
|
|
|
pitch = pitchf.copy()
|
|
|
|
f0_mel = 1127 * np.log(1 + pitch / 700)
|
|
|
|
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
|
|
|
f0_mel_max - f0_mel_min
|
|
|
|
) + 1
|
|
|
|
f0_mel[f0_mel <= 1] = 1
|
|
|
|
f0_mel[f0_mel > 255] = 255
|
|
|
|
pitch = np.rint(f0_mel).astype(np.int64)
|
|
|
|
|
|
|
|
pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
|
|
|
|
pitch = pitch.reshape(1, len(pitch))
|
|
|
|
ds = np.array([sid]).astype(np.int64)
|
|
|
|
|
|
|
|
rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
|
|
|
|
hubert_length = np.array([hubert_length]).astype(np.int64)
|
|
|
|
|
|
|
|
out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
|
2023-05-30 09:22:53 +02:00
|
|
|
out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
|
|
|
|
return out_wav[0:org_length]
|