1
0
mirror of synced 2024-11-23 23:21:03 +01:00

fix: all logger format

according to #1159
This commit is contained in:
源文雨 2023-09-02 12:09:19 +08:00
parent dace5a6f99
commit b3f22dcdef
8 changed files with 33 additions and 83 deletions

View File

@ -358,7 +358,7 @@ if __name__ == "__main__":
)
if event == "start_vc" and self.flag_vc == False:
if self.set_values(values) == True:
logger.info("Use CUDA:" + str(torch.cuda.is_available()))
logger.info("Use CUDA: %b", torch.cuda.is_available())
self.start_vc()
settings = {
"pth_path": values["pth_path"],
@ -625,7 +625,7 @@ if __name__ == "__main__":
sola_offset = sola_offset.item()
else:
sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
logger.debug("sola_offset =" + str(int(sola_offset)))
logger.debug("sola_offset = %d", int(sola_offset))
self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
self.output_wav[: self.crossfade_frame] *= self.fade_in_window
self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
@ -665,7 +665,7 @@ if __name__ == "__main__":
outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
total_time = time.perf_counter() - start_time
self.window["infer_time"].update(int(total_time * 1000))
logger.info("Infer time:" + str(total_time))
logger.info("Infer time: %.2f", total_time)
def get_devices(self, update: bool = True):
"""获取设备列表"""
@ -719,10 +719,10 @@ if __name__ == "__main__":
output_devices.index(output_device)
]
logger.info(
"Input device:" + str(sd.default.device[0]) + ":" + str(input_device)
"Input device: %s:%d", str(sd.default.device[0]), input_device
)
logger.info(
"Output device:" + str(sd.default.device[1]) + ":" + str(output_device)
"Output device: %s:%d", str(sd.default.device[1]), output_device
)
gui = GUI()

View File

@ -370,9 +370,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp
yield log
def change_sr2(sr2, if_f0_3, version19):
path_str = "" if version19 == "v1" else "_v2"
f0_str = "f0" if if_f0_3 else ""
def get_pretrained_models(path_str, f0_str, sr2):
if_pretrained_generator_exist = os.access(
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK
)
@ -381,13 +379,13 @@ def change_sr2(sr2, if_f0_3, version19):
)
if not if_pretrained_generator_exist:
logger.warn(
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2),
"not exist, will not use pretrained model",
"assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model",
path_str, f0_str, sr2
)
if not if_pretrained_discriminator_exist:
logger.warn(
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2),
"not exist, will not use pretrained model",
"assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model",
path_str, f0_str, sr2
)
return (
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)
@ -399,6 +397,12 @@ def change_sr2(sr2, if_f0_3, version19):
)
def change_sr2(sr2, if_f0_3, version19):
path_str = "" if version19 == "v1" else "_v2"
f0_str = "f0" if if_f0_3 else ""
return get_pretrained_models(path_str, f0_str, sr2)
def change_version19(sr2, if_f0_3, version19):
path_str = "" if version19 == "v1" else "_v2"
if sr2 == "32k" and version19 == "v1":
@ -409,72 +413,18 @@ def change_version19(sr2, if_f0_3, version19):
else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2}
)
f0_str = "f0" if if_f0_3 else ""
if_pretrained_generator_exist = os.access(
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK
)
if_pretrained_discriminator_exist = os.access(
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK
)
if not if_pretrained_generator_exist:
logger.warn(
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2),
"not exist, will not use pretrained model",
)
if not if_pretrained_discriminator_exist:
logger.warn(
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2),
"not exist, will not use pretrained model",
)
return (
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)
if if_pretrained_generator_exist
else "",
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)
if if_pretrained_discriminator_exist
else "",
*get_pretrained_models(path_str, f0_str, sr2),
to_return_sr2,
)
def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
path_str = "" if version19 == "v1" else "_v2"
if_pretrained_generator_exist = os.access(
"assets/pretrained%s/f0G%s.pth" % (path_str, sr2), os.F_OK
)
if_pretrained_discriminator_exist = os.access(
"assets/pretrained%s/f0D%s.pth" % (path_str, sr2), os.F_OK
)
if not if_pretrained_generator_exist:
logger.warn(
"assets/pretrained%s/f0G%s.pth" % (path_str, sr2),
"not exist, will not use pretrained model",
)
if not if_pretrained_discriminator_exist:
logger.warn(
"assets/pretrained%s/f0D%s.pth" % (path_str, sr2),
"not exist, will not use pretrained model",
)
if if_f0_3:
return (
{"visible": True, "__type__": "update"},
"assets/pretrained%s/f0G%s.pth" % (path_str, sr2)
if if_pretrained_generator_exist
else "",
"assets/pretrained%s/f0D%s.pth" % (path_str, sr2)
if if_pretrained_discriminator_exist
else "",
)
return (
{"visible": False, "__type__": "update"},
("assets/pretrained%s/G%s.pth" % (path_str, sr2))
if if_pretrained_generator_exist
else "",
("assets/pretrained%s/D%s.pth" % (path_str, sr2))
if if_pretrained_discriminator_exist
else "",
{"visible": if_f0_3, "__type__": "update"}, *get_pretrained_models(path_str, "f0", sr2)
)
# but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
def click_train(
exp_dir1,
@ -561,7 +511,7 @@ def click_train(
logger.debug("Write filelist done")
# 生成config#无需生成config
# cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
logger.info("Use gpus:", gpus16)
logger.info("Use gpus: %s", str(gpus16))
if pretrained_G14 == "":
logger.info("No pretrained Generator")
if pretrained_D15 == "":

View File

@ -617,7 +617,7 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@ -735,7 +735,7 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@ -850,7 +850,7 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@ -958,7 +958,7 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):

View File

@ -621,7 +621,7 @@ class SynthesizerTrnMsNSFsidM(nn.Module):
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
self.speaker_map = None
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):

View File

@ -695,4 +695,4 @@ if __name__ == "__main__":
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
t1 = ttime()
logger.info(f0.shape, t1 - t0)
logger.info("%s %.2f", f0.shape, t1 - t0)

View File

@ -113,7 +113,7 @@ class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
try:
spec = torch.load(spec_filename)
except:
logger.warn(spec_filename, traceback.format_exc())
logger.warn("%s %s", spec_filename, traceback.format_exc())
spec = spectrogram_torch(
audio_norm,
self.filter_length,
@ -305,7 +305,7 @@ class TextAudioLoader(torch.utils.data.Dataset):
try:
spec = torch.load(spec_filename)
except:
logger.warn(spec_filename, traceback.format_exc())
logger.warn("%s %s", spec_filename, traceback.format_exc())
spec = spectrogram_torch(
audio_norm,
self.filter_length,

View File

@ -54,9 +54,9 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False)
"""
# Validation
if torch.min(y) < -1.07:
logger.debug("min value is ", torch.min(y))
logger.debug("min value is %s", str(torch.min(y)))
if torch.max(y) > 1.07:
logger.debug("max value is ", torch.max(y))
logger.debug("max value is %s", str(torch.max(y)))
# Window - Cache if needed
global hann_window

View File

@ -35,12 +35,12 @@ def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
if saved_state_dict[k].shape != state_dict[k].shape:
logger.warn(
"shape-%s-mismatch. need: %s, get: %s"
% (k, state_dict[k].shape, saved_state_dict[k].shape)
, k, state_dict[k].shape, saved_state_dict[k].shape
) #
raise KeyError
except:
# logger.info(traceback.format_exc())
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
logger.info("%s is not in the checkpoint", k) # pretrain缺失的
new_state_dict[k] = v # 模型自带的随机值
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict, strict=False)
@ -111,12 +111,12 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
if saved_state_dict[k].shape != state_dict[k].shape:
logger.warn(
"shape-%s-mismatch|need-%s|get-%s"
% (k, state_dict[k].shape, saved_state_dict[k].shape)
, k, state_dict[k].shape, saved_state_dict[k].shape
) #
raise KeyError
except:
# logger.info(traceback.format_exc())
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
logger.info("%s is not in the checkpoint", k) # pretrain缺失的
new_state_dict[k] = v # 模型自带的随机值
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict, strict=False)