mirror of
https://github.com/Anjok07/ultimatevocalremovergui.git
synced 2024-12-18 02:16:00 +01:00
Add files via upload
This commit is contained in:
parent
01d7195cb3
commit
932ab13040
4
UVR.py
4
UVR.py
@ -78,7 +78,7 @@ def get_execution_time(function, name):
|
|||||||
time_difference = end - start
|
time_difference = end - start
|
||||||
print(f'{name} Execution Time: ', time_difference)
|
print(f'{name} Execution Time: ', time_difference)
|
||||||
|
|
||||||
PREVIOUS_PATCH_WIN = 'UVR_Patch_9_25_23_2_1'
|
PREVIOUS_PATCH_WIN = 'UVR_Patch_10_6_23_4_27'
|
||||||
|
|
||||||
is_dnd_compatible = True
|
is_dnd_compatible = True
|
||||||
banner_placement = -2
|
banner_placement = -2
|
||||||
@ -3228,7 +3228,7 @@ class MainWindow(TkinterDnD.Tk if is_dnd_compatible else tk.Tk):
|
|||||||
self.app_update_button = ttk.Button(settings_menu_main_Frame, textvariable=self.app_update_button_Text_var, width=SETTINGS_BUT_WIDTH-2, command=lambda:self.pop_up_update_confirmation())
|
self.app_update_button = ttk.Button(settings_menu_main_Frame, textvariable=self.app_update_button_Text_var, width=SETTINGS_BUT_WIDTH-2, command=lambda:self.pop_up_update_confirmation())
|
||||||
self.app_update_button.grid(pady=MENU_PADDING_1)
|
self.app_update_button.grid(pady=MENU_PADDING_1)
|
||||||
|
|
||||||
self.app_update_status_Label = tk.Label(settings_menu_main_Frame, textvariable=self.app_update_status_Text_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), width=UPDATE_LABEL_WIDTH, justify="center", relief="ridge", fg="#13849f")
|
self.app_update_status_Label = tk.Label(settings_menu_main_Frame, textvariable=self.app_update_status_Text_var, padx=3, pady=3, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), width=UPDATE_LABEL_WIDTH, justify="center", relief="ridge", fg="#13849f")
|
||||||
self.app_update_status_Label.grid(pady=20)
|
self.app_update_status_Label.grid(pady=20)
|
||||||
|
|
||||||
donate_Button = ttk.Button(settings_menu_main_Frame, image=self.donate_img, command=lambda:webbrowser.open_new_tab(DONATE_LINK_BMAC))
|
donate_Button = ttk.Button(settings_menu_main_Frame, image=self.donate_img, command=lambda:webbrowser.open_new_tab(DONATE_LINK_BMAC))
|
||||||
|
37
separate.py
37
separate.py
@ -117,7 +117,7 @@ class SeperateAttributes:
|
|||||||
self.main_model_primary = main_model_primary
|
self.main_model_primary = main_model_primary
|
||||||
self.ensemble_primary_stem = model_data.ensemble_primary_stem
|
self.ensemble_primary_stem = model_data.ensemble_primary_stem
|
||||||
self.is_multi_stem_ensemble = model_data.is_multi_stem_ensemble
|
self.is_multi_stem_ensemble = model_data.is_multi_stem_ensemble
|
||||||
self.is_gpu = False
|
self.is_mps = False
|
||||||
self.is_deverb = True
|
self.is_deverb = True
|
||||||
self.DENOISER_MODEL = model_data.DENOISER_MODEL
|
self.DENOISER_MODEL = model_data.DENOISER_MODEL
|
||||||
self.DEVERBER_MODEL = model_data.DEVERBER_MODEL
|
self.DEVERBER_MODEL = model_data.DEVERBER_MODEL
|
||||||
@ -137,6 +137,8 @@ class SeperateAttributes:
|
|||||||
self.stem_path_init = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav')
|
self.stem_path_init = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav')
|
||||||
self.deverb_vocal_opt = model_data.deverb_vocal_opt
|
self.deverb_vocal_opt = model_data.deverb_vocal_opt
|
||||||
self.is_save_vocal_only = model_data.is_save_vocal_only
|
self.is_save_vocal_only = model_data.is_save_vocal_only
|
||||||
|
self.device = 'cpu'
|
||||||
|
self.run_type = ['CPUExecutionProvider']
|
||||||
|
|
||||||
if self.is_inst_only_voc_splitter or self.is_sec_bv_rebalance:
|
if self.is_inst_only_voc_splitter or self.is_sec_bv_rebalance:
|
||||||
self.is_primary_stem_only = False
|
self.is_primary_stem_only = False
|
||||||
@ -145,6 +147,14 @@ class SeperateAttributes:
|
|||||||
if main_model_primary and self.is_multi_stem_ensemble:
|
if main_model_primary and self.is_multi_stem_ensemble:
|
||||||
self.primary_stem, self.secondary_stem = main_model_primary, secondary_stem(main_model_primary)
|
self.primary_stem, self.secondary_stem = main_model_primary, secondary_stem(main_model_primary)
|
||||||
|
|
||||||
|
if self.is_gpu_conversion >= 0:
|
||||||
|
if OPERATING_SYSTEM == 'Darwin' and torch.backends.mps.is_available():
|
||||||
|
self.device = 'mps'
|
||||||
|
self.is_mps = True
|
||||||
|
elif torch.cuda.is_available():
|
||||||
|
self.device = 'cuda:0'
|
||||||
|
self.run_type = ['CUDAExecutionProvider']
|
||||||
|
|
||||||
if model_data.process_method == MDX_ARCH_TYPE:
|
if model_data.process_method == MDX_ARCH_TYPE:
|
||||||
self.is_mdx_ckpt = model_data.is_mdx_ckpt
|
self.is_mdx_ckpt = model_data.is_mdx_ckpt
|
||||||
self.primary_model_name, self.primary_sources = self.cached_source_callback(MDX_ARCH_TYPE, model_name=self.model_basename)
|
self.primary_model_name, self.primary_sources = self.cached_source_callback(MDX_ARCH_TYPE, model_name=self.model_basename)
|
||||||
@ -170,13 +180,6 @@ class SeperateAttributes:
|
|||||||
self.dim_c = 4
|
self.dim_c = 4
|
||||||
self.hop = 1024
|
self.hop = 1024
|
||||||
|
|
||||||
if self.is_gpu_conversion >= 0 and torch.cuda.is_available():
|
|
||||||
self.is_gpu = True
|
|
||||||
self.device, self.run_type = torch.device('cuda:0'), ['CUDAExecutionProvider']
|
|
||||||
else:
|
|
||||||
self.is_gpu = False
|
|
||||||
self.device, self.run_type = torch.device('cpu'), ['CPUExecutionProvider']
|
|
||||||
|
|
||||||
if model_data.process_method == DEMUCS_ARCH_TYPE:
|
if model_data.process_method == DEMUCS_ARCH_TYPE:
|
||||||
self.demucs_stems = model_data.demucs_stems if not main_process_method in [MDX_ARCH_TYPE, VR_ARCH_TYPE] else None
|
self.demucs_stems = model_data.demucs_stems if not main_process_method in [MDX_ARCH_TYPE, VR_ARCH_TYPE] else None
|
||||||
self.secondary_model_4_stem = model_data.secondary_model_4_stem
|
self.secondary_model_4_stem = model_data.secondary_model_4_stem
|
||||||
@ -189,6 +192,7 @@ class SeperateAttributes:
|
|||||||
self.is_demucs_combine_stems = model_data.is_demucs_combine_stems
|
self.is_demucs_combine_stems = model_data.is_demucs_combine_stems
|
||||||
self.demucs_stem_count = model_data.demucs_stem_count
|
self.demucs_stem_count = model_data.demucs_stem_count
|
||||||
self.pre_proc_model = model_data.pre_proc_model
|
self.pre_proc_model = model_data.pre_proc_model
|
||||||
|
self.device = 'cpu' if self.is_mps and not self.demucs_version == DEMUCS_V4 else self.device
|
||||||
|
|
||||||
self.primary_stem = model_data.ensemble_primary_stem if process_data['is_ensemble_master'] else model_data.primary_stem
|
self.primary_stem = model_data.ensemble_primary_stem if process_data['is_ensemble_master'] else model_data.primary_stem
|
||||||
self.secondary_stem = model_data.ensemble_secondary_stem if process_data['is_ensemble_master'] else model_data.secondary_stem
|
self.secondary_stem = model_data.ensemble_secondary_stem if process_data['is_ensemble_master'] else model_data.secondary_stem
|
||||||
@ -218,7 +222,6 @@ class SeperateAttributes:
|
|||||||
self.primary_model_name, self.primary_sources = self.cached_source_callback(DEMUCS_ARCH_TYPE, model_name=self.model_basename)
|
self.primary_model_name, self.primary_sources = self.cached_source_callback(DEMUCS_ARCH_TYPE, model_name=self.model_basename)
|
||||||
|
|
||||||
if model_data.process_method == VR_ARCH_TYPE:
|
if model_data.process_method == VR_ARCH_TYPE:
|
||||||
self.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
|
|
||||||
self.check_label_secondary_stem_runs()
|
self.check_label_secondary_stem_runs()
|
||||||
self.primary_model_name, self.primary_sources = self.cached_source_callback(VR_ARCH_TYPE, model_name=self.model_basename)
|
self.primary_model_name, self.primary_sources = self.cached_source_callback(VR_ARCH_TYPE, model_name=self.model_basename)
|
||||||
self.mp = model_data.vr_model_param
|
self.mp = model_data.vr_model_param
|
||||||
@ -446,7 +449,7 @@ class SeperateMDX(SeperateAttributes):
|
|||||||
separator = MdxnetSet.ConvTDFNet(**model_params)
|
separator = MdxnetSet.ConvTDFNet(**model_params)
|
||||||
self.model_run = separator.load_from_checkpoint(self.model_path).to(self.device).eval()
|
self.model_run = separator.load_from_checkpoint(self.model_path).to(self.device).eval()
|
||||||
else:
|
else:
|
||||||
if self.mdx_segment_size == self.dim_t:
|
if self.mdx_segment_size == self.dim_t and not self.is_mps:
|
||||||
ort_ = ort.InferenceSession(self.model_path, providers=self.run_type)
|
ort_ = ort.InferenceSession(self.model_path, providers=self.run_type)
|
||||||
self.model_run = lambda spek:ort_.run(None, {'input': spek.cpu().numpy()})[0]
|
self.model_run = lambda spek:ort_.run(None, {'input': spek.cpu().numpy()})[0]
|
||||||
else:
|
else:
|
||||||
@ -786,11 +789,6 @@ class SeperateDemucs(SeperateAttributes):
|
|||||||
mix = prepare_mix(self.audio_file)
|
mix = prepare_mix(self.audio_file)
|
||||||
|
|
||||||
if is_no_cache:
|
if is_no_cache:
|
||||||
if self.is_gpu_conversion >= 0:
|
|
||||||
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
|
||||||
else:
|
|
||||||
self.device = torch.device('cpu')
|
|
||||||
|
|
||||||
if self.demucs_version == DEMUCS_V1:
|
if self.demucs_version == DEMUCS_V1:
|
||||||
if str(self.model_path).endswith(".gz"):
|
if str(self.model_path).endswith(".gz"):
|
||||||
self.model_path = gzip.open(self.model_path, "rb")
|
self.model_path = gzip.open(self.model_path, "rb")
|
||||||
@ -1002,13 +1000,8 @@ class SeperateVR(SeperateAttributes):
|
|||||||
self.load_cached_sources()
|
self.load_cached_sources()
|
||||||
else:
|
else:
|
||||||
self.start_inference_console_write()
|
self.start_inference_console_write()
|
||||||
if self.is_gpu_conversion >= 0:
|
|
||||||
if OPERATING_SYSTEM == 'Darwin':
|
device = self.device
|
||||||
device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')
|
|
||||||
else:
|
|
||||||
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
|
||||||
else:
|
|
||||||
device = torch.device('cpu')
|
|
||||||
|
|
||||||
nn_arch_sizes = [
|
nn_arch_sizes = [
|
||||||
31191, # default
|
31191, # default
|
||||||
|
Loading…
Reference in New Issue
Block a user