1
0
mirror of synced 2024-11-28 01:10:56 +01:00
Retrieval-based-Voice-Conve.../configs/config.py

202 lines
6.4 KiB
Python
Raw Normal View History

2023-05-02 14:07:03 +02:00
import argparse
2023-08-28 09:08:31 +02:00
import os
import sys
2023-05-02 14:07:03 +02:00
from multiprocessing import cpu_count
2023-08-28 09:08:31 +02:00
import torch
2023-06-24 07:56:09 +02:00
def use_fp32_config():
for config_file in [
2023-08-19 12:36:15 +02:00
"v1/32k.json",
"v1/40k.json",
"v1/48k.json",
"v2/48k.json",
"v2/32k.json",
]:
2023-05-21 05:30:27 +02:00
with open(f"configs/{config_file}", "r") as f:
strr = f.read().replace("true", "false")
with open(f"configs/{config_file}", "w") as f:
f.write(strr)
2023-08-19 15:13:46 +02:00
with open("infer/modules/train/preprocess.py", "r") as f:
2023-05-21 05:30:27 +02:00
strr = f.read().replace("3.7", "3.0")
2023-08-19 15:13:46 +02:00
with open("infer/modules/train/preprocess.py", "w") as f:
2023-05-21 05:30:27 +02:00
f.write(strr)
2023-05-02 14:07:03 +02:00
2023-05-02 14:07:03 +02:00
class Config:
def __init__(self):
self.device = "cuda:0"
self.is_half = True
self.n_cpu = 0
self.gpu_name = None
self.gpu_mem = None
(
self.python_cmd,
self.listen_port,
self.iscolab,
self.noparallel,
self.noautoopen,
self.dml,
2023-05-02 14:07:03 +02:00
) = self.arg_parse()
self.instead = ""
2023-05-02 14:07:03 +02:00
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
@staticmethod
def arg_parse() -> tuple:
exe = sys.executable or "python"
2023-05-02 14:07:03 +02:00
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=7865, help="Listen port")
parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
2023-05-02 14:07:03 +02:00
parser.add_argument("--colab", action="store_true", help="Launch in colab")
parser.add_argument(
"--noparallel", action="store_true", help="Disable parallel processing"
)
parser.add_argument(
"--noautoopen",
action="store_true",
help="Do not open in browser automatically",
)
2023-08-12 19:05:58 +02:00
parser.add_argument(
"--dml",
action="store_true",
help="torch_dml",
)
2023-05-02 14:07:03 +02:00
cmd_opts = parser.parse_args()
cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
2023-05-02 14:07:03 +02:00
return (
cmd_opts.pycmd,
cmd_opts.port,
cmd_opts.colab,
cmd_opts.noparallel,
cmd_opts.noautoopen,
cmd_opts.dml,
2023-05-02 14:07:03 +02:00
)
2023-06-24 07:56:09 +02:00
# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
# check `getattr` and try it for compatibility
@staticmethod
def has_mps() -> bool:
if not torch.backends.mps.is_available():
return False
2023-06-24 07:56:09 +02:00
try:
torch.zeros(1).to(torch.device("mps"))
return True
except Exception:
return False
2023-05-02 14:07:03 +02:00
def device_config(self) -> tuple:
if torch.cuda.is_available():
i_device = int(self.device.split(":")[-1])
self.gpu_name = torch.cuda.get_device_name(i_device)
if (
2023-05-02 14:31:05 +02:00
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
2023-05-02 14:07:03 +02:00
or "P40" in self.gpu_name.upper()
or "P10" in self.gpu_name.upper()
2023-05-04 16:22:46 +02:00
or "1060" in self.gpu_name
2023-05-02 14:07:03 +02:00
or "1070" in self.gpu_name
or "1080" in self.gpu_name
):
2023-06-24 09:26:14 +02:00
print("Found GPU", self.gpu_name, ", force to fp32")
2023-05-02 14:07:03 +02:00
self.is_half = False
2023-06-24 07:56:09 +02:00
use_fp32_config()
2023-05-02 14:07:03 +02:00
else:
2023-06-24 09:26:14 +02:00
print("Found GPU", self.gpu_name)
2023-05-02 14:07:03 +02:00
self.gpu_mem = int(
torch.cuda.get_device_properties(i_device).total_memory
/ 1024
/ 1024
/ 1024
+ 0.4
)
2023-05-02 14:07:03 +02:00
if self.gpu_mem <= 4:
2023-08-19 15:13:46 +02:00
with open("infer/modules/train/preprocess.py", "r") as f:
2023-05-02 14:07:03 +02:00
strr = f.read().replace("3.7", "3.0")
2023-08-19 15:13:46 +02:00
with open("infer/modules/train/preprocess.py", "w") as f:
2023-05-02 14:07:03 +02:00
f.write(strr)
2023-06-24 07:56:09 +02:00
elif self.has_mps():
2023-08-12 19:05:58 +02:00
print("No supported Nvidia GPU found")
self.device = self.instead = "mps"
2023-05-10 15:17:13 +02:00
self.is_half = False
2023-06-24 07:56:09 +02:00
use_fp32_config()
2023-05-02 14:07:03 +02:00
else:
2023-08-12 19:05:58 +02:00
print("No supported Nvidia GPU found")
self.device = self.instead = "cpu"
2023-05-10 15:19:09 +02:00
self.is_half = False
2023-06-24 07:56:09 +02:00
use_fp32_config()
2023-05-02 14:07:03 +02:00
if self.n_cpu == 0:
self.n_cpu = cpu_count()
if self.is_half:
# 6G显存配置
x_pad = 3
x_query = 10
x_center = 60
x_max = 65
else:
# 5G显存配置
x_pad = 1
x_query = 6
x_center = 38
x_max = 41
if self.gpu_mem is not None and self.gpu_mem <= 4:
2023-05-02 14:07:03 +02:00
x_pad = 1
x_query = 5
x_center = 30
x_max = 32
if self.dml:
2023-08-12 19:05:58 +02:00
print("use DirectML instead")
2023-08-19 12:17:30 +02:00
if (
os.path.exists(
"runtime\Lib\site-packages\onnxruntime\capi\DirectML.dll"
)
== False
):
2023-08-19 12:04:45 +02:00
try:
2023-08-19 12:17:30 +02:00
os.rename(
"runtime\Lib\site-packages\onnxruntime",
"runtime\Lib\site-packages\onnxruntime-cuda",
)
2023-08-19 12:04:45 +02:00
except:
pass
try:
2023-08-19 12:17:30 +02:00
os.rename(
"runtime\Lib\site-packages\onnxruntime-dml",
"runtime\Lib\site-packages\onnxruntime",
)
2023-08-19 12:04:45 +02:00
except:
pass
if self.device != "cpu":
2023-08-23 12:52:10 +02:00
import torch_directml
2023-08-19 12:17:30 +02:00
2023-08-23 12:52:10 +02:00
self.device = torch_directml.device(torch_directml.default_device())
self.is_half = False
2023-08-12 19:05:58 +02:00
else:
if self.instead:
print(f"use {self.instead} instead")
2023-08-19 12:17:30 +02:00
if (
os.path.exists(
"runtime\Lib\site-packages\onnxruntime\capi\onnxruntime_providers_cuda.dll"
)
== False
):
2023-08-19 12:04:45 +02:00
try:
2023-08-19 12:17:30 +02:00
os.rename(
"runtime\Lib\site-packages\onnxruntime",
"runtime\Lib\site-packages\onnxruntime-dml",
)
2023-08-19 12:04:45 +02:00
except:
pass
try:
2023-08-19 12:17:30 +02:00
os.rename(
"runtime\Lib\site-packages\onnxruntime-cuda",
"runtime\Lib\site-packages\onnxruntime",
)
2023-08-19 12:04:45 +02:00
except:
pass
2023-05-02 14:07:03 +02:00
return x_pad, x_query, x_center, x_max