From e9dd11bddb279709c7d9b66d94d97fe0c533ceca Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 17:14:33 +0800 Subject: [PATCH] chore(sync): merge dev into main (#1379) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Optimize latency (#1259) * add attribute: configs/config.py Optimize latency: tools/rvc_for_realtime.py * new file: assets/Synthesizer_inputs.pth * fix: configs/config.py fix: tools/rvc_for_realtime.py * fix bug: infer/lib/infer_pack/models.py * new file: assets/hubert_inputs.pth new file: assets/rmvpe_inputs.pth modified: configs/config.py new features: infer/lib/rmvpe.py new features: tools/jit_export/__init__.py new features: tools/jit_export/get_hubert.py new features: tools/jit_export/get_rmvpe.py new features: tools/jit_export/get_synthesizer.py optimize: tools/rvc_for_realtime.py * optimize: tools/jit_export/get_synthesizer.py fix bug: tools/jit_export/__init__.py * Fixed a bug caused by using half on the CPU: infer/lib/rmvpe.py Fixed a bug caused by using half on the CPU: tools/jit_export/__init__.py Fixed CIRCULAR IMPORT: tools/jit_export/get_rmvpe.py Fixed CIRCULAR IMPORT: tools/jit_export/get_synthesizer.py Fixed a bug caused by using half on the CPU: tools/rvc_for_realtime.py * Remove useless code: infer/lib/rmvpe.py * Delete gui_v1 copy.py * Delete .vscode/launch.json * Delete jit_export_test.py * Delete tools/rvc_for_realtime copy.py * Delete configs/config.json * Delete .gitignore * Fix exceptions caused by switching inference devices: infer/lib/rmvpe.py Fix exceptions caused by switching inference devices: tools/jit_export/__init__.py Fix exceptions caused by switching inference devices: tools/rvc_for_realtime.py * restore * replace(you can undo this commit) * remove debug_print --------- Co-authored-by: Ftps * Fixed some bugs when exporting ONNX model (#1254) * fix import (#1280) * fix import * lint * 🎨 同步 locale (#1242) Co-authored-by: github-actions[bot] * Fix jit load and import issue (#1282) * fix jit model loading : infer/lib/rmvpe.py * modified: assets/hubert/.gitignore move file: assets/hubert_inputs.pth -> assets/hubert/hubert_inputs.pth modified: assets/rmvpe/.gitignore move file: assets/rmvpe_inputs.pth -> assets/rmvpe/rmvpe_inputs.pth fix import: gui_v1.py * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * Add input wav and delay time monitor for real-time gui (#1293) * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] * feat: edit PR template * add input wav and delay time monitor --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> * Optimize latency using scripted jit (#1291) * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] * feat: edit PR template * Optimize-latency-using-scripted: configs/config.py Optimize-latency-using-scripted: infer/lib/infer_pack/attentions.py Optimize-latency-using-scripted: infer/lib/infer_pack/commons.py Optimize-latency-using-scripted: infer/lib/infer_pack/models.py Optimize-latency-using-scripted: infer/lib/infer_pack/modules.py Optimize-latency-using-scripted: infer/lib/jit/__init__.py Optimize-latency-using-scripted: infer/lib/jit/get_hubert.py Optimize-latency-using-scripted: infer/lib/jit/get_rmvpe.py Optimize-latency-using-scripted: infer/lib/jit/get_synthesizer.py Optimize-latency-using-scripted: infer/lib/rmvpe.py Optimize-latency-using-scripted: tools/rvc_for_realtime.py * modified: infer/lib/infer_pack/models.py * fix some bug: configs/config.py fix some bug: infer/lib/infer_pack/models.py fix some bug: infer/lib/rmvpe.py * Fixed abnormal reference of logger in multiprocessing: infer/modules/train/train.py --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] * Format code (#1298) Co-authored-by: github-actions[bot] * 🎨 同步 locale (#1299) Co-authored-by: github-actions[bot] * feat: optimize actions * feat(workflow): add sync dev * feat: optimize actions * feat: optimize actions * feat: optimize actions * feat: optimize actions * feat: add jit options (#1303) Delete useless code: infer/lib/jit/get_synthesizer.py Optimized code: tools/rvc_for_realtime.py * Code refactor + re-design inference ui (#1304) * Code refacor + re-design inference ui * Fix tabname * i18n jp --------- Co-authored-by: Ftps * feat: optimize actions * feat: optimize actions * Update README & en_US locale file (#1309) * critical: some bug fixes (#1322) * JIT acceleration switch does not support hot update * fix padding bug of rmvpe in torch-directml * fix padding bug of rmvpe in torch-directml * Fix STFT under torch_directml (#1330) * chore(format): run black on dev (#1318) Co-authored-by: github-actions[bot] * chore(i18n): sync locale on dev (#1317) Co-authored-by: github-actions[bot] * feat: allow for tta to be passed to uvr (#1361) * chore(format): run black on dev (#1373) Co-authored-by: github-actions[bot] * Added script for automatically download all needed models at install (#1366) * Delete modules.py * Add files via upload * Add files via upload * Add files via upload * Add files via upload * chore(i18n): sync locale on dev (#1377) Co-authored-by: github-actions[bot] * chore(format): run black on dev (#1376) Co-authored-by: github-actions[bot] * Update IPEX library (#1362) * Update IPEX library * Update ipex index * chore(format): run black on dev (#1378) Co-authored-by: github-actions[bot] --------- Co-authored-by: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Co-authored-by: Ftps Co-authored-by: shizuku_nia <102004222+ShizukuNia@users.noreply.github.com> Co-authored-by: Ftps <63702646+Tps-F@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: yxlllc <33565655+yxlllc@users.noreply.github.com> Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Co-authored-by: Blaise <133521603+blaise-tk@users.noreply.github.com> Co-authored-by: Rice Cake Co-authored-by: AWAS666 <33494149+AWAS666@users.noreply.github.com> Co-authored-by: Dmitry Co-authored-by: Disty0 <47277141+Disty0@users.noreply.github.com> --- README.md | 35 ++- assets/Synthesizer_inputs.pth | Bin 0 -> 122495 bytes assets/hubert/.gitignore | 1 + assets/hubert/hubert_inputs.pth | Bin 0 -> 169434 bytes assets/rmvpe/.gitignore | 1 + assets/rmvpe/rmvpe_inputs.pth | Bin 0 -> 33527 bytes configs/config.json | 16 +- configs/config.py | 13 +- docs/en/README.en.md | 7 +- gui_v1.py | 209 ++++++++------ i18n/locale/en_US.json | 12 +- i18n/locale/es_ES.json | 2 + i18n/locale/fr_FR.json | 2 + i18n/locale/it_IT.json | 2 + i18n/locale/ja_JP.json | 2 + i18n/locale/ru_RU.json | 2 + i18n/locale/tr_TR.json | 2 + i18n/locale/zh_CN.json | 2 + i18n/locale/zh_HK.json | 2 + i18n/locale/zh_SG.json | 2 + i18n/locale/zh_TW.json | 2 + infer-web.py | 417 +++++++++++++++------------- infer/lib/infer_pack/attentions.py | 124 ++++++--- infer/lib/infer_pack/commons.py | 23 +- infer/lib/infer_pack/models.py | 410 +++++++++++++++++++++------ infer/lib/infer_pack/models_onnx.py | 7 +- infer/lib/infer_pack/modules.py | 152 ++++++++-- infer/lib/jit/__init__.py | 163 +++++++++++ infer/lib/jit/get_hubert.py | 342 +++++++++++++++++++++++ infer/lib/jit/get_rmvpe.py | 12 + infer/lib/jit/get_synthesizer.py | 37 +++ infer/lib/rmvpe.py | 307 +++++++++----------- infer/modules/ipex/__init__.py | 15 +- infer/modules/ipex/attention.py | 137 +++++---- infer/modules/train/train.py | 19 +- infer/modules/uvr5/preprocess.py | 8 +- infer/modules/vc/modules.py | 10 +- modules.py | 307 -------------------- requirements-ipex.txt | 2 +- tools/download_models.py | 79 ++++++ tools/rvc_for_realtime.py | 166 +++++++---- tools/torchgate/torchgate.py | 83 ++++-- 42 files changed, 2014 insertions(+), 1120 deletions(-) create mode 100644 assets/Synthesizer_inputs.pth create mode 100644 assets/hubert/hubert_inputs.pth create mode 100644 assets/rmvpe/rmvpe_inputs.pth create mode 100644 infer/lib/jit/__init__.py create mode 100644 infer/lib/jit/get_hubert.py create mode 100644 infer/lib/jit/get_rmvpe.py create mode 100644 infer/lib/jit/get_synthesizer.py delete mode 100644 modules.py create mode 100644 tools/download_models.py diff --git a/README.md b/README.md index f221877..5385a04 100644 --- a/README.md +++ b/README.md @@ -68,12 +68,16 @@ poetry install 你也可以通过 pip 来安装依赖: ```bash N卡: - -pip install -r requirements.txt + pip install -r requirements.txt A卡/I卡: -pip install -r requirements-dml.txt + pip install -r requirements-dml.txt +A卡Rocm(Linux): + pip install -r requirements-amd.txt + +I卡IPEX(Linux): + pip install -r requirements-ipex.txt ``` ------ @@ -122,11 +126,34 @@ https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.pt ```bash python infer-web.py ``` - 如果你正在使用Windows 或 macOS,你可以直接下载并解压`RVC-beta.7z`,前者可以运行`go-web.bat`以启动WebUI,后者则运行命令`sh ./run.sh`以启动WebUI。 +对于需要使用IPEX技术的I卡用户,请先在终端执行`source /opt/intel/oneapi/setvars.sh`(仅Linux)。 + 仓库内还有一份`小白简易教程.doc`以供参考。 +## AMD显卡Rocm相关(仅Linux) +如果你想基于AMD的Rocm技术在Linux系统上运行RVC,请先在[这里](https://rocm.docs.amd.com/en/latest/deploy/linux/os-native/install.html)安装所需的驱动。 + +若你使用的是Arch Linux,可以使用pacman来安装所需驱动: +```` +pacman -S rocm-hip-sdk rocm-opencl-sdk +```` +对于某些型号的显卡,你可能需要额外配置如下的环境变量(如:RX6700XT): +```` +export ROCM_PATH=/opt/rocm +export HSA_OVERRIDE_GFX_VERSION=10.3.0 +```` +同时确保你的当前用户处于`render`与`video`用户组内: +```` +sudo usermod -aG render $USERNAME +sudo usermod -aG video $USERNAME +```` +之后运行WebUI: +```bash +python infer-web.py +``` + ## 参考项目 + [ContentVec](https://github.com/auspicious3000/contentvec/) + [VITS](https://github.com/jaywalnut310/vits) diff --git a/assets/Synthesizer_inputs.pth b/assets/Synthesizer_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..faa509ef7123ae84237f8a59105949014d56bb26 GIT binary patch literal 122495 zcmZ_W2Ygdy`#=7&vFs^SMk%W!J7;FJ$dV;OL6!(*4_O67KtT51plm2mI+HZ(BN$gbye|_BWv15j0mKZc~?D#>$b^X7d zIAYX<*#6^(4E%b;sKNaw4jD6H?D+nZOtG0I6BB!t!(W(ypByl3NM@;j|4MAT0i%X? z`}cdXnWcM`z>gR*%k(OXUk80Xc!0%_S@zSIyAw0Zb?q3_u|&tapT?9}kXatzqTejY z{9w_bL1Raa8Zv0&h_PcP#C|q@@R0FC2DctDXku(;Y*Kn=g^8IJ(=#jes`Ovawg0Fg zV}?x}J|VO6a6F~q|MfIFjU6-Wzn?~x=+iJ}R{ft(qguxj6EmwvpT)e)8XZf_%dEL* zIQ1impI5A>S8VwAQH&EaP3f8DUbu;3 z#t)b{B$N5i!~FY%*i1J14-6XjbtXrT!T;wm1bPhd-^Z{_%(SLw+P>Cx=~%A(tDXlF zJ}ns&L;vgY|GYUX#903Sn^SLS+mib~Z_WSxR{g*J9#1=_sk^Crt8c3>3x>(V)NpDz4oa#e)z-n*!T3OYV5WDbw`)Szg!!iVrXFk$Hpm6J zZbCPqv{+jFUHD!26#DV~_?}!(u9z)mKY?fFXXY-(F2+roO`1cy4(*!cn&i6ZzUYqk z#Czrz%`JMF|1y71%AS`3-Bf0|De6UBO#dX`)8we@T36)0ybXR8eK>jzL1 z5}-W{fx*xLL}&xUU>W=XW1${wwr;i#g)d>{>@9zqYn!dv);Y(sVhoDxn6^!;=gEsPdM@+0}uY-x5Mw~xCfUK4v0p|^06yT~17 zjxse2z|>&UAs6N*&Q1Kl{efHO*ZKE(_IX5El;`;8_}js5_ind-r+(+cg3eP8Ri@2GfQTvowuELp=P1x3*#5Yb1COiMmk42 z$N9(khr?O-S@+-izx6Aa70f!rIzvocOx#27L+>HaA%Fid>p@>-Ojdz(adOOsd=gSlIfBunN4QjFmISG+!k&t zzm;DhtPp0|X4z(>&PY8j92ffA`rBq$W?1eDcZFmzS^SOvji+lFi3eAP)H0WXKVLD?8>A0scGT zJHZCyz{`0#x|E!ob3gDu@L%&?^F_=Nb0@Kr_*!@^SVW81+S=MG*aiDr+gn=>+!gPN zS%xe_ZLk`x#?yw=28YpMybWtjYfUc3#f)Reu_GX#$!GLty_v4hE8Qzyf+j&zKfiwd z&Z3<~{k{FYll_zZ9i$G@ZO9B{2Cl-S;H2OkH~@8_YoKeuB3YzIzDK@^o{65-uGOx$ z1#b(6P&U6_OJGj^NsU0OKg_- zyZLuBKKg91Fj!b6P$f_#7s=Ox*Mj@q``vlX^P1C#ysp2lf2=%KuB=v8zf-W^i1?s?|(4kN+Y=o^KLA7wTa4`&t42je%u35YjjJu7yFM)NBb11e2gEXrMYg-2HI(cIW`V7yn+I1yi68%z^3960S$CN4|#@PzOd;flH9CU7u(Fx)cKGW3!9kt!>) z(k9dE9uIt@T~Ae2ueX|dC~Hsxm)IL*}QS{#%w8D zni-lIqA`#@hX$9yHQF=U^NsY46bVPdNl?*W(ccnU$*tu2?)vUazDvH-!PCK6(kw~m z)Hy#>KT};nSMYP`b7^B>W5DHhx#?PtmBvc#g6)FOeb0Rl+z;GI{v`il`LL`BYr@S` zP?{-wVUM~;J+2&AS_WGN3#0;Rw12d}0_4In7+f&8Am5m89KsA?ny^jS?_e{~SY!qc z{)8@E7mTpOFoVW$&DdsaDwE2@nq$o!OdU*};X2sOcC!jPM#pr4FJT}|gdboz(3scW z+}`}s_|kaLa1fucT83G=S-SK3^ZFD+iovF{>EaUO60hZ6%Pr$5}>V0>#>5?I(s$4Baf< zEN`rDtnXofZGi0x^s)D`H?TFZoe|Cm*Tw7NJL@~^HrqB^Nqb5AP1{Xd1&9%2#L8@C zcA##c?s~!X0-ML?+3nlyTkl`*kH8P!AG|S+7{`~oFLg8@WOLaZ#Wa^Jmn;--{thGU zBkeyz`PA~M%^?MJU<46x=~L^%EBh<^1vml@(>BGcZ&JZLAj6VJkUXeYE2dYXEgzJX3` zC-x0IGCwlg5^ae?+(X>&HZITr4e?h6aWP`YZjF>FRX#nsiMn2d&(# z-2Zs~@w8~tqDd!rCwG=7%cDuwB#-ez^D)gI1s(-oI%`PcfjBZLvc58#0l@QL_|xCBCQ0WQLR@Ph*MzPy8f z;3@n92-C$$&SMlH40|I zOmWB)^Aqzj<1%9dO#@Af{1*8)cir5Ts7=(qHoP|Ufs7p)I~MyE``$_Kq&TRWQ#I#z zzx9rlKGUw9ctx ztz%7wL&6~;8|bxnv39YRww1Qcwa&HDYo)QKm()x8$M=t~wW+l!Juf})6VoRqit|eg zr3DLT;j%5+mP_mr&XI@K?jH2D+BVoyVO;IYl`OJPSNs<*xEg z-%Z~a?l0UFcaK-bE5UFud?0in^hSB3*i@T(N;#!GSD&jij*L;qs3(<^N(MZXAIf#$ ztH4)*R4G+5`_2B=zSh1u-Z|bDP#O|FiJo@ucJ9lr%dW1l8|D|yFLJmYZdzZx^S<+r z@{jTtNkvk%V6|W;xs#ltq$n?7ySiQdDD+Wi3DEr1EZi)d4pU({Y=VVAzb7jp04L!m z*Z`AZEYMu$hg)z6z5o`wz_;)-7$Sy9LAW4HH@+KOFb#f#8Q~e>mT*Kpq7H_YK(Wsz zV8TqeHqe|J3kT(c^2or*fKztLWz;h2OQ3OchHr-N3+W5#d4uN-?g#G&yTaO}wMp;I z@6ADP(7TD>#3yk{+>XGGz$Rsr(k0L(-~tUC(;w5fGqy9H%R84xap?7w>nU_C8|xbD zMwv&M#|z_y7xowSO14T^z4W(rwsf}iwDz>p`g$4!LAJ`)pZK5nAB7)fSTmXbj0O)m`i^+AKE92I~gv zUr^Up*S6QP*U|(USsPimK`UD;+ov!F=zWQWF3{H6)_M_g;FaZ-h4KUoxCPu~p!aYq zx0R!H^zYp7T#be`8t%93x7@Sdvpy4^3H$8(?Cw-|>LK>eakg^M1O1x*5h9M%gTzuQFGe>Fji@qyp^s zK#%ze^9i%vwB5ASu+-2?+e_Owsc+Kn@xRAoh{7}QNyCkp39!^z2AFj zzB%GP;?9S0o^hTOUy6_7oYHXIbKEn~HPBVjQPDBYIn7zhQ_6GKd)IrxbHQ^g_gHRw zeS1Bv?K6xS#(PcfHJR+3?0i%3reL69prJNfn@!`=IC@W7YFldGHGS8#L~e;(`ZI-9 zSPODOyP4e#La&sHO)562?W*mXmOL$a2tS0s$=+lshm-72_NN2IoMTO6O_Cr9f0+I- z#d+d9rvj$}8Bl(6`OTDnsv*=6mKv8DX&rW*zs}DU<_fKht&AlsB`maFpKY3L5@5M* zc{J~Hls(FhO(51YKYrjD%E3gZ6+_V{tls4Q(TBBhIihJU%i$k^@blFHDB< zq46OaSI33Ng}1^7V1OZTLB1gKuwB`%vSRavzG*MP&4Q%^DVF3T=UQ=zHw5%Upq({$6cz_P$X`KfdKIbJJh zg%RQi@tWnDg+7;kbbWMl8_jJLtBcju($~^EI0r`;doDkhj}zm>LTjP5IaIb(wooiX zd6aHkH?AGij_H=vEoqT=k+)}{XP~@XUVapO6ikKjf$@RO(q`#5|8M>;;Ii+skB48} zzql(qD?3lbpNNmu$LbL_nSNw`WN1F`!S%o$HIftHE_;_91P7UeOdZ%`+GC>6?J~nM z!)5(teMLC0JFg1>tvya_Pirq}E@_@NecH6@_Nv===IzW&&ri?)6*3Ak3Ys{YILLDUP#@vz&u5)-l$vn6H>Wg+GN1s0b#}B)SE+K>4b5;yQ7Vb&z!#w~YJT z@wwwk!IOgL+UMFtAyGI0`!xGBbS>-q>-#T2r9h=XL$#q=SFS6shtHk-Tlp#HXx`Da@%1KrPS|9d=7mzRIOU3ZU|Hj|M z+r*pi%y-@^yjOUm=tdC*JN_bnQQo$^ZAqpi(_m&Wv(miMJUnT5(x&E{nzu-3k#JRe zRXdUy$xv=J5mp;k8$3FX?pMvPnhZmR;UD%Nwv13lpmA%0euDn$)~j3J?RvNCm*&4T zZ=KXS>45HlZcpt!wX*_Q0gX@Nb2(fNf6yP?r|eU@y1TmRwbJ_aC&y2Y77bc7m~5PE zG&5%Ah3183asJ}`PlKNZaY+K-1-}bUm8ME`Eolrs&z@(unYWos8cG^y?(byjWT6;% zl3|iz6T6AsBkU1gz;XUKp4nc01x#b7v15#5j6HID&kWYcJ_8oc1(7N(7_3=4lb&dba8iam++ME^n>lN7e>H9 z_dxd==Nf0xuA*I!lO88c)KAn8G7d5ZO+nKi=0D7{m|4sYb_e?cD3+nmc`KkeCjdS8 z9{d6*0=n;?@=y5#Kyk$1&{%9NPJskVf@Pz)QLF`ZEOjhftXr)2EcYz^!D6*o=ZW*g z_56B%m#|AHWi4e5T7nkJcjAvMm4r&dLGwZL59|-DTXc&TXbB5pp&+p!k>*Rv(`GczXgtw7(MzAD z?e6XFhnk0)_tJZ5dExTHehK{&4zdT?0M*^R-C!B8L+cq_e?IsxS}KZV1-!@d{Z7tz?Km8X@bG??HAutIxxd-q1y zMpuEez?tAmaJ?^jU)0~--z@_@Z;A)ATv;w!JMH)G_fm|am9)}R&r{E6?`SXMXZ(Lj ze@QO*9To)_1)ut!`s{wYe~L6kdKY*X_(%FDnggJ*rn{%Rr%#|y;EMZ-ySul$*Q@jD zI^}iB`<3~XQDH{PjFiQ~Vu5nVfAD|s9|P4Nn(CYCm%5g^<{0J}>N0g1m)T|hh5v;= zEuI!T^Z#luv=5evJ0~d&u5>{ zwr#d;KDYDS&c%6)^ZqROv*1_!WWJgjvSciZ>cw`Kz?aRU`U{Luy(M!++F@m`ApfUZdA*J%Y;9K1ZV{lfO62Z z9;MiaaSun1O0jr9)%tuv;J+UL8L+CynJ4sADSPc_voGS zPKnkk<#zH5h)JN*Bs|ET6-bM*7pAlx9F z6iSNTTVJVPsYB(V@&xD&jiD0Ix@fLQUKu3p#?c4$3XPkCQ?-+Q3XiNovgR)>e4haQbrLm_UorrN~c1l8Yh{%Mc~ z6u-{^ig0M{6AQF{vO<4Pf6oy3%=ek^ssE|Jid03K z)#LWK`+E9%XrdkK9qXl>d%7n*`nlwsoHLvpPEIf+7^vsq8*3YDX`H69n`*tu zU^EyFwgOwhQqNLP8F!gz{qBP4f@uajgY|G8ZWcd_PZd&yeZYteBJQ0c<$n+sSXuzh z>s6sS41qmR)>@WA2J3iO2`8Yst-9@F7y%egZO7p$l((0+R|5JBr2^&0DJM?758uF7 zP#-8~{yA&`2aJcxaMyO%R>NMyJ{j&o{nYxY#r9%*b=Ys)Z)5C?JuWpa^%h{MmqtHB zu|r3oocr9=xv3-4Mx^D!2>S>-J!i_PI&Dr{jno>ce?YyodTF_~T-zkeBxGou7RJWd z%G%3DpNDE!?FadTe1T9PAWbWJ;RA?RBUT!}wtBaE(>3Xu=lbXR8HqCzYcaK$zFc2! zrF*5DYPa(sW?jrWi_hY_hW?WPlrWYs-tgS;l$J_IKbILy1~Zl&%T8maF(Zmb6j2WK zj&w(=C|8uPDp!>$>J+s52U8{H9$h(pE*zwq*FVYvKyVKpj0ewbS z!a(mp?|R>QALr-%6M)8&bub-<0(~~`fLn4)xq;k(HE0bk0VkA}OUrRU`G!Kc0`x1= zj`G3a!C-Z{x;zd}K~<%yLIKa};Oc1ak~iQD)R1e)m*FHh0*-*8$WWA)m6jFuggyPF ze$w8+UQ{)P1}K*L(Db2+YH_FPpQ@kgP4zy-7(g+Swa8k;VUbdkt;tesUr(whrF+x8 zZTxNgl#l+?^rz{kl%G-@K}YbG|CYbKqrHRbRs9nCB{pH2FlmM~1LXwH3TK4{;sUXm z&`fy8zvC%ScTzYR?I&tzXlS7LIw%B%hFn9AV&v{jcZTN6Wx8d$k*<+0>Yt+gamR4S zaG6M%NLR2yKgbW~hpVDSJ3KTzM0tU7ayfaUvQasz9#t=gE{AG_YeaJ&RYFxlA1NOx zuL7?E^ZoPv`#t+TsYR(plrIbChI6YotKMuOR7|LtaBBUj^*OmYxm3$P=RW7Ax&zh1 zcSt*=3V{lNkKm#7P+ASN_N1EGB#46#eINShJ;t!@9_AY6y5YRxjCIAj==~ey9pwEE zp1Pm9d%1eKDE>L(I^v>y)Gyv&yjwh5JoTLQoIMhIB%-w5vJ-p$?M*x;Z+<7?B` zCKu~ssixHuVok9oi@{Nu6 zJyeTgQK6D*UKMdzp!?i22Zj)x9rJF~rCO23rf^uOt;2DO3Rz|!k|2g!VLv{&c>`+yz9 z3}UFCgx1omlXceg*7Rbz?(ZDv9GD)Q9<<07c{N;v@=AH7Hk5@wC{Ja|0#J-C6^H~$3x397IZ=sl*jP1Ru4V1k?wt@9XyhTtLTkhIUg&;Q2r#-qAb zH}z~#-RQmhz1z{u(X4Gs+Z3u#oHL&@-*Vk@edzqqc~NswbDg=)ARIDN{YOf3m)r)oL1aai=2Ysty24&zDfgDqJfr!0{yqN% z_XS5egt_Lq=5#)try}Gr-!b2O*L+vY#w{CnGIlav^j!4V{I+QQ(j$4KG0GTalsrnV zsnk?xETR~>VyI$h1n?@a2H+Y{9`ZGu37?7PDZh+-895q0it1Cd=;xB|H!i|OHo-x- z4fo&=xB@Z7F~zNb`Z~(*F26e+=0P^>gnY<>Z7>&xfdwkUmEtSK+hGiJ1S8ag*OAwe zli-CuK;y|2px*~zF|be)n!qfe=Tfq`WHJ3cdcIgfg=0`;KK8slZ?W$1SJc9{A|Ho|0>3)DN39!d|bM&7FrdRLNQU1VK! zd@B_$6;22w1Uf05l=Mh?gmSp0feo_33UUQ`ow82(Tm4%-SbVU!eWZP)gjyoHCgj6> z_@~fMAyE>gZJ}+UST$A^AY0Csmr6^c^CiVN&A4XVG3FQ}u@ZYnydxfm6rkK7#Ul&s z3+#JS_og0)<@V+FPf|Zg9hWvPjRLZ-pg#V?#7 zm<(US=gOKZNqo3r8R|r?Elz5 zO#LwRB8*HMnRY4lQflM0#%c3X=cQ7-)fVXalMeO{_7gDEHq+MM-rqjTHp&)BjigSu zO}EjSi4y!@ieHLp)-;-OqxoCvm82Xv^#|9r*0p}ceZ@VtKDM4=&M>RdFR=-z{`QIR ziSRl5IZI>gZT)TiL)SxBV`pP$d4G97^%PMKR9B!Y$dmFU8DD61iH=#cPP$cetLVJv zyyuz!nZI_RcHkS>1TxV0lqe_4i-CI9@?a?}fa#C~GlDaM>4Efs%kT12?AEQQThYRu z3wQR^_S8-^O*B#s>_{o6EvN0@pnro- zu1>Cx-5I9MTpMS%dVGA zvCLJ^RgcYKbD#vKr{`VST-oe&IbBp=d+vVju99CRf4*_Pk!oHWO&d*yTtn_9&n3^w z#w#25()H4nVahOH2ww;_O*KtHeNg`c_X9`$luflwwGB)SOlOPE7EvCvnyZ>?eA4)& zfG^;qIiG5IwY{~yg|0%^v6N#e?{nVg(6y|Od9{twMp-T|muCfL1+J)9)JEY(;rHr$ z^Q%374umU#09A%EOKe#`*UR|%!_bC6f4O~z`si4q3O+5(o{SyDtB-|uS zeXDV(C0A3bDR+Tl*|+jrnPNQ`8uC7Lf9ReUm=`#ga4un@G*O~>_m})%^7}OH)AaGi z#~ar=*E+8lt{Cnpca%|)QIW4;S9n+WkH8<%-Yu$=$U!+sInZB2zlLaj_)z^&)wndS zZ#>_4YA4rDPSvJrt2M6Hc&=uyrodQW9M6nr`m%jl>c6Gl<{TI(4iw`p@s?(=Qd}AB z4=@M@foh#tVsO8(zp&TL*UYLxH4IM~o|2xEo^#uI+etwQ-Q#=VcjyF^pW5u)>`c#2 z&mONGucbb8ALCZL)##oAq!WY$p#B`H5f(xQWCGfTgjw9I=xe97^bb%H z>OcWs5RF;32wQ|d#XrTh==XlbzGBz$>v*a&4P%Bef13X^FJ=}qzdL_-Qf@-jiMspA z_md0c0{Ie94s3ULcer_^d4ziK9;=Vly^*~UglVCe3NaO?Dbtk2fyDu;8C6lLC}sU+ zqp{Coelg!lXeE4L`G7)23(b@Cx<^_@TE=nXxKLIot6hV34d$8WnP-c$Me6Tx!RO-V z;zsNnpgack9(gU^XwO7fYu9Ms1oiMy9~kA!C{Knq3tJLUFCTrsKG3f^p#BP)J1|^D za|1_gM{Ie}4-QxlSZUo&>n-YqugX{DX|D;5Pxp-XjCb^R^oKQvHKD{%;@zCPIhlEx zdC&5n$PXA8-i}@Eb)uK2XXL~L_7wLYY0F3gaKZ!r#PO2)75=IHf495(uy{*0T zljkRYY5dZtNzx>BH+46?W8N{{*luhX3GrTVpy`38e;fXe_HI4mAMrM`&Afxz!Bo~) z)|byKpZ6&7Q6lY6Y3gk1O}>xeP(7@2a6Zvt-uRKKCV?J@o_{t3PbzQ*3h-c_De z9{MVVCEg|8lKzsYGQRO&m##~j1DgZ&g7wgqI5WD&_*ni}o()uUr98k67!E_=2z;V^ zqU?lKV20cBZFHGEjK+&&)v;<^C@w@XI_3X6E1i{rP&!l^5#vvxn@~PnKFoxe5Y+;x zPk2>mRW!b*K2};=-%;un^X8Yq+}Rx@OvgK>Lg+&%2{&M-lzKF`6-&cZPR{eM$S0s3=0Y-Z*od zIipratwDxC28x5a3*ChwK=WlNFO(<#S=VJZ%`Zq&L_1!Dyx z)m^o@+T4-ZBeNGKE=(+$QZj|+dxS?iMWbjE^AhuDe55+n3VDUBSM<@|JQ@oqS4=hT zEzmpEJG2;nfWtubhilZumHJA37J3%ahxOs@q3xmb>UlLHN8~AxrlzUqLgzwl!fnEQh!0tT=HmB2 zzSPv}(SRCMjLFtRX04Q)LlJtCP)Gp3tGj(3(0afG&u!0b73>wFdD<)XEA~4;>#5p+7HpdfI@>$j|Abpm6*M-Dt+KT; zb_RZJrMxcfZCeXe0~%}@Y@ufBK9)X~ziofp3hjmVvtYv){n;PFAHpngmPkcCdR*#H zoG4BdpYhN5KG<8N=kIAWYJPFu1qnv=I1?Zy#?5r&<{ zokq^gnSTHuT!44b5?UBr7#}u&*qr+RKP&pI=x6uO?q58=cxL-%`!4w}`FjL<1agD9 z!Lit9^9trFbCsKrtR|~HVG^tX`fR2{C7>~{5U8jB1hmDz04nk-_^JXFvr=41<2&_4 zJyael$v|@+{aXX&fimp_X{0t%D=HP0%E8J(-p~6xc{+JI_&P-ER4&;ie=U72E%Ge# zG|6p}i&m;#zd3$$(0JcP>7rbgE=!NSkG-WGr5zcSGAe!K_{cFgI5#*!ouJkX)(k$D z9!u1Zw={ccHpS-W0_Otbg5!cM-7VcsAeBvJ2MPm)hs;AJ)0yd{UhHYu%T^}2Ome1> zDU?TF{}y;|dT!bZd-Z$uTyLKz%1~{cruhz)J5*@9SN!cO6SSmRLH!bbhcXShU=;+*8wC(_Kni zN;{vM&ryGQV@qSpMtBbC)^v1PY_Wy`sc`E;=mZJy((*F;bG0qCEyKm(qDyc^_g4zM zz~^y!TpOqWkJ-oU&pffQ7Yz<2d?84|~=_+;=HQ1*}kGCJF z=yTk9+&bAZ*+Q{#J)xfPnt9Ef1BzFNSch02iVvgv8CP;Exoeth8k#FpND!FRUJ#=$vxKid)=3}~Jx_c@2QmFqJp&1u1#z*^2Og58kG0<3p(rQ%L7PjpLs`=9R zTL{!AJqf-7%8O(HjZ=r<7#s$)%SRu_Yx5$)Jz{$a4eSl2UzWctv^Sc@ScHMLvzD`#E5em%9X$^FC^ulA zbdH!K*0R*H(7n^o+|S%j*G|_WuSMRM<}b}pxF;Od?5R)YX5!7nf}(<=cqv|5nYA*D z_RnUs+3bD&eSHrYRXD0}&#pbYs$+$XDV5jT^ft;5%xC5^Wi4eXEpF+`cjYPP(?RSY zZZ&K*>}B>czvTUr*Ey+kQtPIzo6>zreOh#ne#d>s9WWh;el9PXE}9;0ez=*&)474U zfj9CSd2n!Wunipa9`x=v>^C$uHZ{^7E9$jAC!P~aT1#4Ke>ANnP)f1U`}mvfx9FOJ z`U*aR!`8#rS%inMZ3gJ_DCO*TXofDq`%|j1ixpldpG&mAY+!U2wklJV>8$UppO!E!AuBE` zj@C4kOY5QQp*x^IpkHEGVwh!|WgKcAYW9EuC~rl32a@&4dUuLDb8DV3CZ2YLsJgT=wGD;+2t53?HZ;sBM61tAk)P(0thmmI3u%(0fjC z@B(NDH1EJCUjz?g zPsl!XpZa0w!%$_lvPyM`?b3EB-<$8f@4xTwrgV$W$y6Vu_30k>9=E6!wSDvZ=2s6? z4^Zx`rK_duy8F7DUh^Ps5O>#f*R;A|b-^q1D{~I?OYWDPYDP}v>mk^wH(Sjlt+CG9^S)K&&oUY9Xq7ykfwAk<8*NtE!ydN)$3L7l8 zd^icTpXmzRgi4l5mSa$0EwFa6b%~CfHSIO+W55R#Yf?OU3>HFDps|*Lwbr%P2bKrX zwJdEmi2>@}pngG`!^#5XduZG^0~G5~zJ}()Bm5D5JkUCNxG-Ee2tUJWXaF=HK7#s| z`j!_!W5ZGLs5nR%gjY08V6lm-tgS2}S>86uI?38bXd_S#pf%f?rP$;Q_QPO{xV62d zy(Pj#n1Pysn!CokMla)KXx?AOE@Npfra5qoG)AIz8`Z6+dZ&7wl2f8Oq(9^jQGO?) zM3lb+e+NDYeh{So8OnD%eNJDkqFP1eUFBW3ytll~q-N-{`(64h_*t-dpn2em|B9dX z1kklyCNGoss(V#)$QJLn`Gx{foX6;L`*I@+783+m9N@HChzry^@G2ASZ0&6H=# z{eg1uluP&o_6PO{J_j08=$<#h8UGpoV4&PpDIa)Cc^h~dc#>Vou0I`rIx0CUIoCtT z5pql_m{h=L^VtoWG-#q}s%g4s$DSRf3juNG<^0R};ho`~dGUGi_w(-OHTN|4y!E~H zb@X@i@04~*=YeXM&tbGY8d;#XGQ}U4fpWFK@<8t;^-a;9H+of652St2 zqtsDqH7FyOktqj7u`cBxXo3u@VU=pLbqebgQt#3$?7!%&c8>N|q4is(TE$i0RbPUX zAklk6?{RIlwmKfR!a8|f^jh9vZ?IGOseG&$D^i>}5P4NP&m~vP6+2iuSo-pPdD_5A z@nZwN0d`-t;Y$c5qMw1Q;#HCETRP{1dON!sx*FQL+PWJ08~Tq)$E1W{La>@#Eqc9X z$TOlfVik2@+W#_BnkiBL-viGB55-~!vkqqc(e#g|EfZQMGP#O0a^(HO=s+%qYTBFk#X@+P&DDnfbk=O{Wxif%r&D2wW3MeBY@CIdrLN$dR-X31+ zzoP3z`G^THS)Hs7z^nb-|GA$wkW$`#!|n~c>EEXMYKL%#Fy)3KYDE3G_+wN|t;N)v zvq9aUP7F>AcB$I-Vb#T zjqY)$-m4R)6QSPStxf!`J|QD z16l=8s*KcxTgoj3E&P$O#bb+q#=Z<%Gt*=D3ik^C>i^ZhEV3*@b(chEqLcdURES|> z7>bdKTt%+o?%{6Qhq%zY(A%PRi`t_NqYdYTb2OAkb9m9U9Fxano3qWyf#hf}5A~4G zbj@^84`UBw4<}`(U@( zqic<&wxu@O51#|HPRRhuvr`?mJA^DD3j^5tB=!Ov*Mm+qAzI@YEshp9!wH}~R6|IC zHZThEfb#us;6Bja$RA)O`~)?uHLSEZoz}9n7nJtrQ?EY7`x_w-UJ5UToBU1wICmV; zL;?Q)IF~2np{nvu&~ZBMCG(P@d~Le}m5wvINSN zW+!JS(>{$vU81gBa=GLk{0^ShfPEADCf3QWlYNpq$u%`KHPIeZv` za;itsx^XB!l+Q3{nDaIH8pBD|5zUunxH24_6GZ>^I$@o# z8z_#yV!mRg>(>+}a1*%3&=uZe-!7ZNrYy)?kVoe~(b+FLK_}4qg4X|oAr5G*osQ}e znGXYuIZT+H7bu+Iv$S`w)6#55h*Eb1bOOsUGa`?(n)iE>BB1>OSh8 zQ8c4ytz)gDYI7{5*V zHpQA~O{BfG&6sA4gLR-I%f))lUh^XDB5j+*HiW&^ZHbliMbL zmh@Q?oee~NpLB*7-EXunad*n@lrFk1x?Db&zhu2+rCedAEz?H%H>ztqG(I%?1;0S| zt<`R|Pq9t0(Ha1y-uNRpL#UI`NuW9rt>NkYkyGRpT8C2aa3j7EPq`4Pg_mQ?v9t%4 z_PtKxCUNI2=PeWmerW&Deu2Hfb~1G`^(pF8w8^{4yCZK$UTm}2W^^r+rDUn4x25-~ z^i+BQM`4^gE?ST7rSwux;9Qtpp8yXsl46!qG3aFlNAMU|4IHVqmt^;VDk%~Qj)6p|YXD*D|IBH|2K2x7! zPBB+xD)}Q~d7G!qQ)WHT9yi(#M88rE;gb21`3KVv6uz5AK?8FGoJ#P- z{2K0??wd{;Pa3bnPSZ})Rr6Kz3+6@iTm#CLW|%T?GSC3i1oH%QP4q)vMxWwA?jT3~ zGvDFdmg%PHrq7wrnGe|ySz6=Y4(2pIjbFko;X0Z-nrC}v zd#J8)x$tr!F0UWQc$r5hjZVtJ*U`YyK&~U#@k8K;z(3x9yaV_Fe1BhmAA>K-chzy# zaqW_JNq=yEaDTi1cF)z$)y8^by(a@F11pu4(fnjfV@qR4rXw>bYfx5S>=~hJSr8}) zP;F`@_J*1x=IEYNL)Z|Ws!mm}gACLs`$T>sQy)Fmp1K!xFFNcz?CgNEmMEXOKeRuT z70yEEWq$Z0IIbR7H(`%G^@&itdN^=65SGFc^)phTqnf6gCdZItSZP{ms?JnrHo_&i z3e;OoeQESr(wH^oL}Q}yg7$)TWwVvd@^bQWRykHV4!I7w+Pd4iYkO*=+k36&cc|j6 z;yn+=K=J-`p!bRT1l#)CM&n+pDRzK-SR<{G=(W>**iddLQ;lkCU@N+Aa|31NveC6H z)vqXir@aA`%cOn3cjP{Kbb$lors?6^^AFjlhD3kCYvUk>gwz2d!+P8shw0i>2c!Y#0C)GFuvjD zoXt75d|Q4@R!r8hq+?0zb?bE(4Hpd~O(RW|ho|0n1JM0G&^*vg-(PK9ZKN9CaQ$%o zIPExXo#Z;nHyhq;NNaG~r?u0)(|yu&(t}oIFU5gJK!oMW*HWT3e4gzto} znYWp@ue+}s)4B`Wh8>zbP2Lo^wB^#4Q%z4bt(s6ZAwDiXPN&srk3&qAm@0Mh>*P;$ zO?5rQ{s`*LrC!{u{H*++oj*J0dFOc#_zw8WN@XRgX$V}Xk7eK`BM1|=Qh#0g{~#-FDWIJl4!5$`oQ|Y&fv}<)mN#Hd8jf}sV~)++`HVn z;!W|U(m1~^LzAKD?C9*E-uEP5lJBPbru${l%OdJcpM-NK2RH^e=H|`K!}i2oRd-h1 zSw5$H&eHs)`4^lQoUgHWrC*?5V5&Sdx(9r-x>;QoS{7;^ZXT^GE($M-t}p1^t3klP z-#F`QE;I+K9a7zx_UuyMA+3SYPC%hkwAM*uIQ96}1?p2g9XgF_;<@OVVU5F$qiYsg z4`9nhvca32p<8sdsSh zT=ZHRdmCwASO;SVV?UuEK9Oq$+LuK+yKne!_&6ahdhQRhQ(f% zUY3g1iqV?o8T%PK^%youYm!z9cBk%6oekX~9v0ab*~`ET+YDrLV{BB9TV+{gq5c%w z7e#AZYUHAr$qS_|rEwbLjA)EH%sR|^1ghDp(eX#N7C>>y8rTmvAL) znA;lK8mL!<&YzedP7o$a-*OuJJ}JL;S~wl;uX-Rn5Y~!o zMQpdW9EY~pW0wu|7<6rD{X(^s%77&rUiAoT8OTO`miF3A0J`p*fND;Z$EXc809ywv zTIdOsyP%$_4M6=#v<9JC@l2rmj&gMuaYhv70Kd0z`ISM~C{>s75r!I>oxqw$Ap-`pQb{ zq(-(zwhZ(c*TK092$iD!VwJR&v~&*4d-1(EbLY&Rbe=553~sGkOYvKhHOWfrt>waU zfqIuI-dm+vrDHr28yn3n3F(*7mFA*XVmTgd4)uX;!Bhoy{^7`?o1qbO(RBub_QL)MqsV=!}YOo^75p z-ZS1VI2W#+)J~#2UXNgpU<1fy3Y7}gg2q7kF&c|#Z!im# zFM6atLR5YNr#@|u?%kz5h@V1Ppc)6|A*psv@yaZDmP~sa<^bj6wgBbMmnusYs!xBQ ze4x1UUD4JDio0uo0D;(K z+$V;XQrsJ~xI=L$60{U3#fuXlo^kht;uLo$NYT;;C|XLtweEZG%kK{;MJ6-%o_o&T zYpoqn2dW0E2B}NpbB=30eaTwC*3YJ(EK`3}Xv^J&6l`6L^ zXIT#OJAN|%WPWaaZiOV$LGArJ&pQvFH_Ym~2&fyF05$^F7JmZmzj1Rj_ln7McQ9(iD=!D`WJ5jC{bbhgp8(Xp<;-X1^8 zuEDOsuK;Vp(|pr>?1>FC4Kp22IG(`XNOe7Gn$XE zjIgW*JS)-DJIX!E9q)>Fz4g8Iaep-*HCe7D)TnUXSQlIu)Pyx*)=!%w=e1t2Ua*|6 zoR1!~MW#ijNKd4Py*mpy96TJ12t)*~`mg%g3*l$UeNh{48#oWuXqtbpTI5IRcv>8+bt%I$DCj%z~)bb4q4GPT;&*rxl zZV_q`;%htXKkTPQ@L1?rsB~fJ!k6Kf;pf5UL9WqUbGrw+2bk$}Iea;s3(|sVL0&Vv zJ-a>6e9wH;0*b@paKli;5YL(HdwhmAuPb)n{GOT@HZP<tVr(Mjh?C(oK(%lKYb^i=c=b`KVG>rKQ>#8H+g3v&vXSKczdWjxoRPPR_A zZPsm8p3OQVUl#Im5B0&bV`j&M#38X5nMS7o|NOK))Al&b4s$t2ImbYF3KQLlZW*}X zy1=9`*LXnv6W2mHXzXb0ppI7xS_0M;qX6vHHhL&nM`n170?f3|w5kz8t#|63lU~lSWNAxSc8bLMp)}d*N>)ukXnPc$Tr;S+v=<1tmCZQx^nBe z&`jP&R`l3{u?4*5+kqFz+3xS{@8xS}FNuBk{=xpirh%q`k)n~JEOD0DBs0nA1!NXG zGj4ca(o6XtaxRmcNzRA9hrapftI&_h%z_-iGYtDj+e6!Nx~TKdLRV&%rA{hXx8r7`{eIi3HW}!17D)YFcWM7JN_5;2F3!kaYMI*w}X?w*}&Pr zQU6gtHFng7&+^RjbaQueyYR(56TiQJBjEVi{aNhwGv*pRy+ zcSgaC0{V5h)^$ZTDmOknBd1DIC6D7D$J?{**^7*ejGMDIXQ9j>=6=Y5>(&lPf2s2Sk1m0oM^DHLvn`zcVkgTV)r?ic% zjf^!%u}ZAc#p+_Kw5-x{mUb2@|GC-|niHCZ>V@i=(wTy8y)SYYx%cOOUj~|JnrOa8 zW(@b)^pH`fxf|@&?bU^}At6tU{dww!rUBRsv|GUMn%_0d!~aV8mGT<0{Ey0x%8~(V za4W&z;1jq4O36!c36isp$p*<*@GJNUoK>7ve3pHdG2@|kT<^GWb~wAxRA}PDXi0H0IzE?hKSG*O#%!g<2U{nB3dUN`&8*O9YIeMWV7!FU}lu`IEa%_y6}d=BOi zW+P9K*_Tg1PgPG9wW#c?3{?$PF|YHy;k=+{q_=uGay}lme%PA(mlg1D{|k6uQqM~FEZnYfua&JpKm?rZMj-aX%!?_-^iwN)K>?s|^hU}IN3XFX>XTNPU?b1QRv zetbUr^pDMt%~h>ct%EFsELp}Z{`YoRo%M(LsS&^O3wr?+dMf1tmr zyDE0Hcir8B-GWy_S33?u8|#d9&c;ju*5_}5IPd|p4aRB4X_BN#Qfg56 zKJc^rPW+vi9>@D2D={mPKOX?97^)bK>W}KVhy6$WkD59c<{wZ$!@k=)&;Z$gR{`}k z`!xGB{T2Nc%;8*MTwp9^E@kF^xQ4NYv3qX!+>DB5X?r#0c&dY0X4qt$-IP5zlpPnllubJM|*)ET|c^>Ii5K-V%EYD z>k%vcbi0rj6%TkHe`o*BzB+q#HnTLC^F+TqB$S?Mu4%5hj=7EtSr@YQz)$LNd0aES zGri;8<56{c;r`G0pYtEnKc=jyc9OM|}xFx+M-Jh^Op_H|hHD8i1 z$+P5H#wo@r9?BldSmTb9#mR<9he+=g+$)$YpDa(;rR$Ui{oo?V z1mgg`EQ|9j3k7v*+zWq*`49tndGo$eeWR#B+<~kw zx6-Youag?Lsk*7UG5Rrr7jOe=gJ-}gz@M|u#o82WdDI2J)%_3je>Mi3H^d%s6p#RF z_BfA-&;3e3i?3FiR+=PrlA6Bdw}99D7UdRYT0&Za!{)H9aIA2Mjbh{c)}YP&Hk+)Q ztRvkc-KoA*q^Ajq+ zo~-@{KFd=8?GJ4OT?0Ya#rL)sa#AM%W*SkSoNLH6xb<%RDcvc!Uux)Bqd$+iTyxcP z)n(;n<+q}5MPD>uG}D7o4%uI<&3)_nR>%V8IU^r@;rqfz&FOY<8e9hK>Ct<~Gio=G z0K`B7l%N$j<2@tv5$B^1GX}6euo!SH`P=ii=N9JK@QlW_hW&i*e_lIYJ4V??+1THC zmisLCM&6A)cY(X0)b>)_KgN8Fp+c7;<;F9W+ zFgJ{~WSWBV75R#a;IsU*ytkq^QU_)$(g1tj^FN)x5;E#J0e`z|PNmIx=%gzz+8gVL!^=erv#M;{|jS)LU2g zR`>F{U=6CSyRMt{TY8r&BfpM?`lP(1yzgD#yO?oqa2wpKe5-_-Uh@9r{f~1W=dy0p zH@0sq^FT_2N~TJt83{8Icn!=K%@SDe@GDM2AF;e2sj}RAh~0lz)_eDrN@k5A6><4n7W628~faW}TDX zCidTd1b-q=iTc-dw7BlU{(|Ca#h022lfM?UbfUos8a0F}v)J+Zr%K?A= zDWIN^{~Zsgf#fqU0H_gs09a@G6Pbb%zk3UfvhF=43}w^Y1S*(E8B_Ni6avt6F9qN zl5CPJMjj)7h)jlfdAvMwU*x`uri!NYg7kuOvU9R(fX%o(W1jJ0=EKbA&gagzF>hnm z%ht=jkbfcHq}imoX1`|V8JRg5LlcH3e53eA@yh(leA98$F%x-s>?yqRz4B29#M;bp zs>D@-f0X^? z)yb=qJEe3&r}TbGx72Q_KZ6?}qHRRmSP&1Q0b&w`pNrax+Dbqis0ZGszE8ak0$@8> z0Tu!N$PAt=-~mU#1#lVg$K!y1#t8W5mVrs2A7CaA`?yU2q@$?~KxI&pQj&5O90RAp zGf*-0f4G)%O~?z{m9i`41V9uvzMF15%BS{C>6=oVT%6n?r9(;>^h)iOIvz9zN0W~x zcLtBapp-!={P&r2#D8;D@~Y&o01NmRz>}0GDIU-i_zivod)}MD;N-!{hm#HqvyAp8 z?M*tVI;pChSUIt|p}FCl?wpRfs_Z{bS4>wh`<|XF_Q1ZfX*+3#Dg&$UJQaNv)TAT!Ov$=KU3Fb`Kkz{L0K2;_%A4w&>V}$znx@*O zNNhh0Rq0!yx7r6YOsO3`ihOF$qnr+yUZjP>Y9K&Z5}Of+KHNp8v3zsB6I9Jz`cnYFsoGr%79hM zRm!G-zNK#9J{X0wDtWJb57*jE7jY4eW~rN z@2p2Oq)@v+n?}Q#Wb}D5C*->Bx~`YLm!KWxuXA6LrOZ-(tNm68+hEWA3Ff+T@6i_X zIRB9SAyevdWrB;<%wJK^=R7uN{mew}bHlJcY#cqq=7Jp27O!`>%7(3528E0qK6x9@c z0)3@@rOfqX#`y{92`N2o^$Y43T*|wYml2r}Iaxeed`fgmwA-@VqDJWOEzd2_x0uOM z%2~>})4bChZHczb!5q8@XnNwjao!WI6Rw_)o{nv;x3#V)swiSli(W7%vhNQ&4?D+N z$6Bd5eyx73HcQMB-lR6W&CZ_Lo`gLKvoXW+jPi`KhP8&3&(5NTMGcds$x`Z`4~q_q zIGdCCd-YBAO>LknU=IKBeaH9xEc+Qx%Mw{HZ7=P4=r(@S{-*7$>8weHx0?55T~l4t zB4q2v*kkN7Fbj^gNA}tHOl2)H-Ii|K=-lXR;BMeP0wMtSbU5O=H+wdF_9M6Bn(vzL zMc_q%`-Y|ArD1uYyl_AG6VxiIRn!dNGAjtTDD=qVM5*D z7xrUJFlwjT z`6%!xupS--_V*6?5BdMboWotnv`%%Ux~}-H`2O|&>wS%Ev)9(w)}Ng}JO9-Dso{R0 zlDLximG+fzuU8UP60MG19h)4J9K+9&v*4Zr?qObI_7KnNyf)VA*XsL1uN?t6A0Qoh zFl&GgFjq1{5+PY*U1Md$Ma^_}&Fyjq??crM+Dtl4?M>+-s$JEl8W&zHKT z?whWgu8r=E?oyso9}?E*_Gmp&%k|&`w}Z9S4#ev^ijw^#;q^p4!%l$MLT11g`^6?Ji}|y zsNDd3&QDC5m_+}0vLV^P+Boat2LUrM=#@+YtkGBvRs-{zneVX)G{WpdBak8lcPBUw zE`!7102mEwfuiK16Ui)K7$C4VP zG)QTc+$yScsP;I)V9jD$_4QY;;~%GA7XA9_aMyl;y#2L z;pW=r+I6aRD(d%)fIj;@V4i-S{sZz5=(}UjYN&pwesJ#K+;+Nly4|YXD&_@puMw9J zm%z`GxkS_h@JvWw9BY-UL#snygFx6lvp1Ja+zb1#42XD}x`C%iGVG2{q3xYZZ>veaSJ3)iDvJA5H@ zL3kE$&3zwwAKDt;8qNWa3Lh0NK~=hWxOtehTk001pa6UV)HcimM*;utd;WX=3V{lN zH@-K%QGro`s>nGR3C*zDul9QpC`4TdYh(qWH*!_@S(dkzxA9y;4@5t4KQU_$3El*6 zk-f;?MBYSB{ohGYL0Un&O0i0DLw-ZPGGS#x-Bxv5m)Ojrc0e8U7Spn=*wFc9j)1Bj7 z<8V65a_x5Rc8n)NNNo!&rs#oHb+?3sv9X1~}vuDiDlEYhCMYW2;7@30ert>Ch3;DcXELki; z?2o4({0r2`X_nuy7)`gBb zk2-tUd)Swomz&$@f1kL{of4&Ix2lP5h0d)%#zz<+C=nh(fx`3J>YK6E)JO>BC6wnUv z?__U-JsRFOJD_=(9Gr|)uyR2QU|*&(;9jK<=4aDCa>aecy$pIwe&1ZPp8B8qH^SRI z)iKqcVTqoB(!OULj1DK(7T7Fty583)*Ky4j8cl<1QEzYpduroK~ zi0p{$2kj3+J`m5N^!C3(ZsAkIQ^Vh22Ih8*i5(NWe#iP9TmY-(RmC%XSB#@u|95nT+M`<37f%T5Fw5bv!?u3{#I^Mnw0d!u{XL@yHqFXKl8HR(MfEwfF0Hs(1eQOIqRj?VFX%>IzkGu&^18N@Fx90w=KTrVri9WAO`T~iNFDh0Q=*8HGMVwb-7xu?xpIb;`xyK$NTF0T%y&(w8OM0uPfQ> zVXoyza0A&x^aHZ*x>B)Hu@!pd)wAg{7U@*_Z z7t9yTtjF=Q+;84*W^>}P^Rbh2);LFYhiivxQvRg;diHvDEdqXO8EYA-5zLll%OX{g zDznn8Tq#{C{n7ZN@gcIa=($=iSueSuzM!^i?OOJjz66PY*-d;t|Ap)a?w9+<^^Lo4 zxo@f9tl;EY!J4DlXZC&cfAk*>91TPwACJBc-j|o)1!)JI@X$!W1K$HA>@D#9?)_b; zSFZM~_Q*hgK)o-vir$Xktmmwt5Bdq&@1@+O+)9s9(B23+EuJV3GZffA9_}3OWS@q* z4{Ec%a)0ISW9?&Q4Q`Tkl9gI9lp;O#fENPIP0~%$XUbx!O?@G)7Pi3XCsK-+{V(zQff6%Sx;FHIuANmMz4&XZk}#Ft30c`gx58>b(KU2AqWzk>~f z4JG;#J!j0&v%nhF62RIoJq#>pD=ef8*mT*Li0k?HnDBuJLC}Y-{JWq&5&lG z2YDNyPDXB&8+XEAfybJbKF)wTpyoQUFn?h_Gt)CbF`$=?b7Z#Iw%FJsrB1jy{7Sqp zt3Y#K+FRO7t?wA~81oI=4cj*RHall=Fc*ZH=_i0avl*BfG8VZcX`^B7tXNVkNsmd7xt4n^_mlaP`8ehS9dI6igY%=HBj;JWGCZO; zO*c)&4T~E>Y9?+eZz-o=Jgf?{&Dehg?2c=mgi z_)N%0Tbj5Or!jrv5yKI~9sM0ai-7SCx@FpBTF#`Vo`{)p{NfDkM8H|Nd}cAji1X!F zs8*;V0N4FNppv?hIvxB1sD0#Ta6)-PDSQ^lHRiJrvT#&$Cu-OqZ3(DTI)xs4M_osq z6mUJ_`&bX~*~|Jc&*ywDE=OkPam8_kNG6hTFB=Ij5PuJA;4@S+RD8x|W@lzIE1kWc zbXB^lDOe#{AvtY4ZR9@tR@AL1y+W^`AHI5i_58kIk$sVUCuY~S@wD-9|8xQV-h4;C zgL?m5PcD+uKY6H~-5b~&VE>Ezd-@ib7q!5#z_HA|Owc;dhv$MdJFe{$EfXz%j~^AQ znI6vU=ntLjXv|z#0aSoJJ?4?K_sBga`;e~!uL6+D`l$uDgY3p19Y10xH`TG$xz-tU z1zoI%&_nhgs1&Fa=!1EEji8U=IczHOGt_xKb3yt6 z)&EhSpseW&MdwMc7v0EXW1vjj%mJ97n4mc3IOgDexkj``^dR;@?0=^JOhISRxf=TTGLABie^mddRu-%* z_-y}dKV&&%Ic7d)Zk^RStE{@Lx`C#FhI>IH;NHy)nD?<&zg5rN39h+(rf>}_WhjNj z#5h9_un(}mISFt^@88I=$>j6EN8^L~% z0r+ck0iQ{Y0RK!&KtCkwOr1eRz`7GXdY3_RVlv#Dql7#Y`UC&c|E0eI_*{=enCc_L zBf~QNGJPjqCrq0d!j~)5u(*!X_s3@*b;5%HbLF1`=Ag6(UYJ!dx>3!UuPf9m)XZ0* zj&TX{Wx3y`c4h`Vi2OOdr?<4Xv~`eok_GrZaF4MA87Bh)byDABUQty?RYxt%L*U=f zv-x+v?|k$s&~I`j|B9e1n3g#$6Jw*=G_*Cem1mymP8Nn&`0ot=LgSq@DS_< zEx>yBdX|p`%?5p>8Gv8i!8B+cKO-MK9$p*^U_X5hIF6Y>d!2io>5gXM3gDc6px7?6MxHc%fj5k)8^CW719;b%E)c02)sJ7plVA@VNHS&YpUCb3^+cd zYN~6hm2#z!Q+WX%LzD}=?2%ZbtL>!J^iSS~tOL z2(HU<(l{wK(>xDV1P8STweQvM)jVq*RvuQyDdH5l(p+gzQBRSy708fgl!`AEA1R5H zWFvQ?1hB``AH6+hOLKm}9PJz}^?QRs0k{dCfhzhcf{uVWDm-Hn`lT0wUEnZy0(cG` z_rGu^C;yxKPM+JCH`EMyEX=$-j6D7WAOjQtdQ5pA(+B%R_e2*4ydUXhZK-Q1=o?HL zlV+D}7gW-NWg@Xi47qvi9^`>8#!OJF+$z6sy$|=zHY+vu+^gWXh}Z*eAKN~56LKz= z8kZXBnc;q8b=>MWUMHO~$8@P5pw~wYUmt%T|05`4yzr=03sws<%keEdtNk4P9LGJ! zJ!ob7_j>kvs5hU2`Bl7rnSF5}a3H`n7TXqUmMBX^ua8}AS69rcnDwXjPi-}15!&@;IOX z^}vgy7sBryn=}@abB3W>{Zo<`oCAM=*Pu#r6^_qHo&>C*R7$Co5g-hzq*h54gYJN{ zfi?rQhf)uMFo4DO|IaO&mO3qUC-4L66V`%(fOC#GV`x1%1x^5J6RLxQDF;&`QzKJX z0{)$&z@(H(DIJqLCiel#6lF?{)EcP~U_53gQWqdj5+iY^fiP6z)V)UecW6WQZD-^@yy zm9!#pMdDEDP-z821w%7^GyM+C;Nd#Nyp#KwjXu>f)za40*7e5yM%epM)56S-8{!+{ zOURGreaik^TX;ON`~{gqp$IA{X$(>a^u+hXM-Npj=9@l(-)<*h?Jfe|J?B*?np z;P7Bf?TQWa-YnPZE;lrEw6SJb$u@h_HNEKki(%{CW?Hur{{y^N_^gTo+(TVYye?!myiRs{+z-7>pnTI*ZuKZFOFeTdrr+Wr&plU@#94)WgPXUVhEYh?Aa z{`v{D)3nokZTQ;28gD#c4S@T?DPSr zuz?+bYY6k4d2SyC`U4T5PALg22F0LGVx2@4_y#No>=XV8;*#QoGwRl)tw~mp19)y7 z3I>8vUCQ8LC~C5?QBc0F5~NpeFDqa@pyVJFYygyePdWEiWrCOVOq1F6l4nsV$Gu#R#>} zYpQE18}dtng#2exT&kytM_*oux9YxjhC)WbZxS)}47qU*+h$zB zxPqRRo)+e;-2g{yM{JX!u|Ep%h_|b3Dj~Pxu<@|*l^b%H_VkYMjPR6fF4@d=@0|Rcd|%YQ zs8gO(o|*oc{>hjb#Mw#IvVOFZ|Hz0DrtdF6Q}&nK+uRnk<_)GVl3P{&utmutS1&c=Fv~{FX$ZVEckDBVz#Ca@VWIn z_yarvd?p4zm#&-b!J=PXJct3d0 z1kMQ0a(b4N1Id9`$OfncZ_qX5`EgJ49*l8}aqya9ZAB6-iSB6XXmVTJmTYUb^`PpY z>J_pJ=ZoiyS+As?;W;vJ_*s5{myc(D>iZ`tCMn*C)g8M;yF{HdoiyhZ z=M+ywPekRBInO-q{m5x*4ZqJ3`4RbQ>1rv(6RcY;S1wmN5b_;V2h}WIkNRJ@mQd%i z6f-EUE3YeaWI3|YaiimI7ThdoYinz}jrntEg*XQxr~Wojxm9q{^>R-GlffV`9UKOI zJbi?o#s=>O?`yyugw9|jXb&a;KM?!HNL%RPe~X+4&VL#Yz5%O23AhK&d(V40vzGh7 zR-RU#G;f-hewBZa-^@PB<;=^OGONrw+cDeG#ofi7<;@Z@V4mkb&!vA8QpmkZ_y(yJ z>g(z2VYd43qTfaI1wRo#5g&{_7<9yM~X+n>=x>O z`sw@WSrcd|ZzzAOdaJr(zheK48TS=56*Tu`_hii8;`y{WGDo#)tvX$ku9>Hur-gi3 zT~l9E@RM*ooM@V8Di9TjSa;c=*q|scFE8h_u@H0s%r0uGZmLd|r^@L$`9t}KGD00e ziSIW=*C?QV-#*@+Rp`QXR}Wj{^O{Ou*;UcYyo-S4ppuz5{nL zkD@==i^*=g;ls{DX8ULDXTh5phxyFx8I%W+peL9M*!!Fas4--oCyoWe{^y$Znzp{K zzU~1$iqxoo)_m5ev??gmUTY8Q4(o#UUxo82 zbwYgY)Hj_3eBJjF?+MzU14##x*uR;ZGB>3=;CTcRyu?A^QPQI%p3B~V1AzKwKIbMP zQCEVY2h>z?e#DW)BZ>6@E(s;i&({IJSQ9I1QPcwEnT&0%wzWDcJu0Qfcw5tLO{;0E zX-l@3Z2vOh%Y+7&29}oAmO`#`KpPO=r!L6G%|{L{&-3hUaz+}jKh8Jhy};gIRaaHl zFOFY?b1ScX?pyDJ@n8X9e4Q}c zs}treu+F5#aaI_tmiM|x`6e<)}D5R%jA+n^hec3+Nzj>}W?LO^Z z2XE+C{;&KzL$vm{_ES?lQ$17d3b{fSFv2~;y$SlFUBO+!-m2cJX^o~eni!mjcP=`} zweF+!qm}*gIl4JQU6K2AuCdItXGT1td7UG~Bg9UTQ?vuU=nlwwq(=IJ=z^${t&%O; z5$)hJHU*5ZkFbx*9F@6GyiY8ailtc6B~j8S>2c6W)=3rxmP(gO8v@QW94;L$Wi4V6 z=nk0U!2HwA;2~HfUnGA8*ppoacmOR0_Dl9ltRkyue)Rn4h6frRsGMIpKi(8?`r7if zg+4ZFBl7|CBm3I>+EW30@)?c{M;~V&=UmVVbjQrqhM*1D0q7II>bUA)Zp8%a1nYLo zcFQ#DH0uf530o?%o!EPrVwz&g*pjh@nL~3A%ssGs@9w=lZ9Q!pOdCx3qI}VC*>D-Z z?|;Pqh)2jq$jrIs+{Q7DW1MoQypFn#+N?0+$#hOJ0JDoWD>f_G^IfZ2tE#N8tY;4H z7GylIwwI^QQ||-y;_JoN*45TcS5K!rOU-q-9vCkgFItVcmYXpvjs4`Pf~W$%XY}R$ zqx?r1uLc;<#m{mi=BKlNJr9WVB0cpMQ`J+|)DUp(gtSLBTsd5MLUjU(c=;+BzA$D? z@;UZ%Y|o6I8H-{T#Y9LWr0ip}=ky3Oh*+bgC-5Nn((t8W0ayX-AOxuWSO$0|S)*Sg zWal-57bstwuN?wv0s0e)k+~ZM=rz2fyrb*^&M3|xaUosN5^RufkPnazkWoiNuQ+SV zd`-1uYsb=GGfp&4l)684KaOSAHMTW2YH8W?jme71LgaNGbrUz;H{D&lUA)ZajesvU z&Jt%i4ETJeUTjNXOMv>Td}qFMj&+W;rKP2XzW2k({9%oPp4t}97S6ZEx5l1PJ);me z?_7+zc+B|x!uo~PtM;nrYv*ehgS7ax_zSiRw*J=s)>LJxGEI}Fsi3Z)F2<~(YW8aO zCeTtugd#%4o?=fkc(d5|dFy=ZMBKd4V`5)9FCj0X3E(V?Sbwaa9sugU_*pVz>LK#y z&YRDhd6ucKtgn=2N;Cgf{;kZ>=jiFxv}&wEJ(<3Eo)@TV7=gLX>=VpKcKH{`L1z#B zsNtv~Eip~#TTm}M3A6`Iz!Sq0D0%7`dh2@&b8A`a<#SN3k!yOZd#jHlx4ko{4wRq> zbX9jnI@nt^_f87{d)!mhQ`Ey%!&P0BU6j}5*X4~sIdENeUB=g59(<60kTd^gJNO2E zsVmAWf^WpGvSTvW1l3#Qr?YO7t;yEZ(bduI0qljW*RI#Tg+E$~J|^q6JO`cu%$Toe zsA%9GV;_1wR}-%$PRER_(#fThPbQvB)Bv=j4P6tvCf3IszpBtkj0UBWN+s>l@6q$w zU4*=EzMn%d>zrTzq3*z%#zpN#Ezf+^9KA_=lla;2*)U!^UOOGLf4N_z4`z;fj=I05 zzhAR!$wz;M`5=r)J zb_32UqF?Bk{FwY@^vmeMqQN31;__FkRtxjd7HJk~CTk~a=Rw~-3Z5SJyV$p3&7=cZ z1YjXWh4P?bF8Bx^{4DTE_X$qF5jxgtct0M}9MW7=U&P&AN!?e~S9L*o0Sbea%Ar6G zo?|vIYxZ4{H@-%;MrHt9)BAyLpd)As=$qXHhRKJ?{{q7l!;layQhbzul+%C8^^e(* zmk}d{wcsSb1M{Uv)=wN5ei}HlRP~ zp8B4e-YRO#qfiUtwUGvX)cz>k1LsudFk$$;>Nj{lyc)0I4WLhveeeqM3i4FUALQSK zQ9kl?NxCFf5vw>YJ}q9DxiE9QY`ly;Z5+|HCF&COd1QW;k(QC3g3tdb@>8Dz)-Skz z@v|(J7t3cWXDhij_tx~*@O|U^w@|xKdmZy}=PKtary$E;34{G3>mzIV*z&Qx+w^YJ z#ni?0pXEQx1W>%Act=fDO%=6t)bqRooiLN0b(P6r5a1WJa$k4SGlLVrn}~&KSRI3d)s^4eUM?#5b~V)eCrK9 zTR#hhpN{5^=DL{ur?qKq8K4X}Z8~i_o^f2*6Dl*6nWc@TjiW)u{T28BApSwz2>m2} z?{}nkq?{QzTRL0Hd*iw0xh5NP*_b0pKQ^Cx^bPQA&;?luD-A0RV!;1Toyo7@6|{@2 zoy|y`k@#BwTF;st`%O`xDP}7-1~Gv3GVa4shE;Li0&AAk#_p2tl42CNd4qd{yRxjZ z?2`PF{EYRC)ee}cQ(!DG7Ws?()dST7TfJMotE{VpeQa-8ZyBsqxoa?Ui_ev0PqL6j zR7+M%Mh$WrG6@@lhM3#OOg5=aYGdAt%k6Ui;r+wQd_M96^0DYmn%Ht;%SM=)%bY0A zfq12SrPKpz2`5HQjI>#8R@kAOtv#(hux5DeKD&?m;0!=5ZzJILxqY1gwGb>moCa?| z3ttQ2oOlMaF6r~R4CwC}3#NeDfF7P(V0d7-ka=D|SUvw@(fnI^S!MZ`t z6)6f71qS;E`>Da`0v7of`T74|23`g}GqVnTU)GDd*}K`>!Nblxfx*7PzKO{2_}2Wb zd8B-#oHHxv|F7$->to-R89?`U-{1Ya;&;XLxao1ToU@$y$n?pyXWD-OS43AtYoIMD zhnd&I?8EGn9FrU;F(-k#kaxN7a${Aosv(%)*F2$l0(CI~Wk9(T8WJhs?8^hlUH|0% z0DzpAJ)psq6owOW)kg&OvH%yq8<=!1Tyc&0cI zb0DS|I`IbZj&1R7@s{IG9KH205#&2UP~FHPr!`zDyz4 zh&8CL;7iPmqQ;u{aXwfB+JP;uEkX{!Eo2?_#~klBfP0%EfLVQGoMW8*0d@65TtkFh zijmHd&dZj|76!&&$i0xuUNrZ;1Iz==d8RzmPq9D6#wuf#tL3ZZv&6H+(i~~d=Y-D* z*FY;|ckY13jJ16}>nExvs_0d{i8&$c-}C&+x_d0Lfy?O12y5~**)$pVudL1VWcMF? z=7-9M%5`z;;_@-8WQSshVv%l<&LlR8w<)$M@Xtx*cDbE1u-LEWXUV$z3C{@+y(H#< zIZz&zjRDpHR=Hn}6IialldF^KO5T;cx}v%w&TyNZFgu|D%tM}4XUr^KC0QjYEiNtQ zbBaCLg@EUYPU22tjL#C$8`%?#iXIicsyS%6s^y*NJJAD014Ylo&%_InuhB!+1L@H- zWqIHYxG29UKcqOM7^ED;lA6#Tfy7nW3ov)BEC|cPm{jqH{4$scSa-StUEEU8L*7Hq zwSb=5hM-7Ur2G!-1UwJKXks*jwS%Er9)?qQHSI0v-8uq3WBA^l16#FQwXB!X56pFr zb>eE8Y8pM5iTR_IBJD{}Z`4a_<|*8wZv# z`xoXf%FWE#SPB+&qAV_Y|Nmx_*BBeBRrv7f~Cy zOS?-;4|a?p#?TOb>EVFA=?O^_lIDYNKnuX0!3n_YkToRsAJ{kO14@J5m|43C+CLt` zndwGf@=o?YJxPSK^t8`!N1td^YBG z{+;l50&}Z)?JiX=RjvRF6blr6k-?&c{(`z-DFhw0G_^E7wNJfAwMVrPGYR7{G>3Cz zSeK+XmHSub1*WJ|)FU(_G=805$GI@vgR>^ac`!VqC1?^f)O4;ftT9lh5U-Ed?^W+r zZ&GYhaF!73FWj$F>p+bj;zq^CjK_?=C|?x!q@06G-969ldY9hy(frZeDW_8oZXY>H zLK4>9&a^z!av?Iplvbs+4l*rS8{cEwV>^?3CigbxNYh*1P0~&B*!tKy1Mt42ANgC~ zx4zci*52yU>e5lIMzyM)P&*+3IZR8mON4B-ICY%5mZFw|8BO;w)1J?=nc#0w2-vv0 z=)H(E;;LShfZg3)-CaDZ4}oWLpM9UbA)pSHexnzF&8!S-hIP1kxEb*^M(SsSFh(uN zT98!?*5|Cxxdl4pcF297^*)P3yJ#@dp%$1M_;UZRBSw)QtMLCuAfuJFJN`Y?M^Z1j7R&)@ zpc*)d*`zrj3sCP67l;#dlhhJ$Pj%aOo6C}~mA{p+zv3RNK4^+e^-|E4{o($@Jq2E- ztG=tgT9}oW;!E*S=goaYJYcVmH9O{$aj*B6<1fcY(MQpKuoE+wm^1#R|4TowW&U@T zG)u|=!X5CAFUKs7Uol4})1B%5X!>Y^%qe@KeWHCHGN+&TpZME2+c>{;eCb#NPx3+g zLHn)PTd|MCkHi%ZR6NiZ^LE)=$0Id1Ns*-B*>Zt$f%0z7-JA@3v98W%#7?juGsbvd znw%!*PmZ4)tbHADA8_yR?(ja0d>GkT+gZ!&xFfQNPI^yzD|;#nKDszxoG%v~#~df_ zM+`v&s+?N5$y)>`Bj1hB*~iFpUh7%w;ohdYtGbIhzSPY!&w%w4YS65Jy4+64iAFiv zTMYL4_WI`f=L$Yf?md-3rLgy5#vSXC%G|CmBbZ_JwUw7|3guid`hzTU7W>{;Sp;&1J1?fcRHqo3c$ zbEAI zIWaIXz}Yp_bv(d)BW4Tzj#(}n0~-TYpH=wx1_TBOuO%Hhn{$91P>;*k!g)mdy!*TZ zeFJ?z0lxlxZ$7U0Qr>KLwlL=saks)Gp8<{mjyv`{_5om-ZJA9DW&mbNU9w-Y)6d9^ z_U_K^&fB1;tEb?}WR{x-zW2XDGIAWKU!#7S|9uTGyPp3ZdsfUidVtw0x8O_XzGD|C zf}e9FGA~8}*2ng{_q#v1J_ugVAK|~-f(-lq0J2suHF0*A-NoOp@oW6wfL7jC-VM$T z&P-dT&13SIxCd7F6u5&^e4GdH(DKmob^h1+WfRIKoOGRZrTNl?SqUF(A8g`mads(b zDd~G;m;8;Hs9Q0^>s*U-Em#ZT%z+4fgdVam<#^9{&lU9*HJ{t74y-yrZ3#6O=iKMq zn~^7?%hY9dj_DjzYajF?{U^R zs~*}^?)9m2z3aS-B-2c*)4^&$9nn}Y089lt0M}OPt5N{< zFY7V4Y#*SmicL&2;QoB5Zz%4t8t6>U@QwD5_V?p_Pr!XT`<^`0@I88mInlhI*}tiS znMAX^v%H+Ew8po_$M>uS^4F)NSyxh5vd6K}GBR87$lpireqtIU{=zZRb--=T=rf%%ns%NV? zjyVqMIp~l5&hnk5vAwaK&l*^ah1_z!W^BcLyqEVOn|>8|5O@&i?(gpBGl%{)#8o?3 zM2~VrIrte@b60b3^=$S0f;qVK%74HK{#@`}(1_vav%<5&$%V;d#WdfaD$?w~bzS@=@$$@eSlR~QBUf_{O1@uh&C^)Pe=86X!>e_#UCQ%J!3;QQbg zp)W$rEnNdPfCNw!EE4)^T%+mBXYc84@NMvN0H7Xm9Fx7dC_h6c8ee-c=7~2#-W;Ei zU%0<;-$pJP_YPkNz79x{Bgy#+thq!)MnryN{Km-jh1q$zrd-o=`*Zt9$wZGpHzTpb>njbN8~m$|d6 zvuiMNezswja$RX%A)`A(lcAwbfM?lA&w-OU$*Q_hWvc-{!ZK z^^_H^g(~odbpS@M(R&l~=UyQ5_8piAYPo8;{=~d0zr}C4mUk^Lqg6($I8mHvoOqmg zzGS|nOG1}~o-KQ}90>jOueM)ptnIIJuM@J}M*BwlxQF2!uIb2tWkxH#%U1!)rr{;v z7EnTWxd`k92LOM}0{ju%>cVs24A=qqnTtRtFdyUq{+=H|RX|@y<-*E^E5TiG68szf zH@rTyJ~Rw;Mpn&3K%dL;@bT~!WHxhupa(;dwMU($6^sMSjptfZ8$HsbP*O+fgtzEyzsU`bHf_k9^^iA&q$Be+*Wg2L5^%k3qFs}n3}GdE>V;y3gZUbl!qs5 zmt&WsazW(+&S-pYd2Xrfs_g3M?&xMM?r-FubB&G4jLO`i*rI5IjP{%2n__AP7UeC< zV>U|}_{-6v5;E<%CXWt`7G{C5&tv!7{oJ1<1`~rlKo~Hqw-7uB*8qJrd{1TpuCd<( zJ9r6NgjdUHS{z&$m4 z&bz=(;1Bsj3&GXk)!?weut0Ons-`ZN*WoMh1>n8QGf*bdQ0bB2^Mub5YG5AQAKNE8 zCp$mcKiLf~gKM#Wv9JcT_q6xa2-FDhb^5$MZwBU3b_XAz7h?9pQQJ{l*{rfzO-xNp zV%tnjVy@k~H11+OuF&&@D<4;)VPS>RfpaE)kW8s>-X%$6Cf(<|CH^Ti2XRm^=Q-`^h^nZ(!czgvSYg9QfnF zSEjE_%+0#)z3%7uZMSWgJeNHDx4%M04ENBnfM>lspayhy zJQLjY-}N`gv-u-v8*D4+J)}Wt@Vmfwfn&a7KIUogOg7dv)-@g39qS$I9rFRtBO_fS zT}|Chp#q$NllxFSnf-2RvAKV*@2iiB^3cHut zUX@?vkAc^|0WwWlBQE1BygS>EyA z@gE5s3Gf=@waM2P4`Z**WpmZ{*7x#yi38c*Y|4_n+`Eo-jCHVf&0pi^$30CuFd1-< zP(Dx|C!lV^nXMG|t6Y1U0$!K=-unZteSZO7`@Cnk9zO!L!?lqF)G<67d=L1H90f#x zdtz#W%7NTqu5jk$-%ZU9XDFTUo$z%8oR!9Xa9zv`v+ydiM%Fi7oyu{)?EA z!uv8FneDGUuRJ!d&C8leH*YswWfQ#CfHlwv{imy#D_p}>!^P)wiLJzTrr=D$wSCw2 z@r+b;U)6oTVg@Vg$n;|W>HX6i=a0jw=TAR%zaqfB{Tu%qtSr}rvl`#q>45#Lx!^C) zU-*-?JoM9{ob3LH4DEY>y^P1;?Ek_!)@YHrRsvpM-JIQ=DUK8e-}jkk}t^UOuP8Sh)vk@5G9agTA|@!j$948hlAl4FwNp7$QMH+<-& zhDlg9(#hY6ujO>}bn|o3bJ11HRSWx;)4kKZPux%3HC#1Z{Q7Hx_3+p7Uez4%zDRwH z71kR&v?Y%mmRzX|){B2d%42hsp_#t;AJCq98JW!4Zo!8|Y+bfxzQ)B`WdUX;BA zapiG>4v)2BYDH?rZtxg%uIyYX22p@D<~6|j6VjTBJzxPC0r)I?j;yO16*VfPU?|`- zvorVuI_sYSza~CQSAiXXUn|csd=~Ql@D}XDJ_gpMJli}##`9|MFJS&NYgRskCzVes zXJ07|u(qOZj$c!AzVC~5VB=3EkS+Xy4(0$O&{F;-+CyQ$&)JR}{CsslxoeVn} z#{B^u932E*95YWjcMbQ2V-w+2pBWqh^wxe4HiOB)iw|nBm=jY88u%OdSpzVW>!J6d zcYtpIPJ&l_8VIUpfOde-qA1!;B~kJ zeLdbgRPfK`_;P%lK}UgO&{=X8%lSh}ASIwd{m18fE#zdfK3?Tr<&8()+z9UoFVAxa zJqJD8{M-EOmz`9fR5OR-6|kvnsuj?Mk49EJJzBlQy~Ovu_r0xyt%KBC<={+n!+XO! z-96pSzVQF|dC`CH9MJ!&(y0VZ3TJ@Bb;ETN)f3g!);Pot@fPeJqQ~SGSOYq$JF2OR zW9}2RWDnF2)H$jg6?J->MVm$b7=KJUROPHq;tz1D}9&&>mdST+oaI)ZI{fMQ^eaQ15kCbyl@kxmTH` z$Wj;pZ+3mK$MsiW0A9IQPVK}Jpp|Ro$%-AshX+AiN=X`Id?gkrOB+^n4*}XL()Ui z*Q(d51JVQ1%eKolp8ZgAA2U%O~H0?Bxk{%_|L%_Q1is*`n8kU6kg!os;x#iiQ zZ?JFhG%|T^=ikof^X@GC)zs$HKu!_XGkAmYpnR@qu87yv12A7UU)EmLUdZ>sweX#e>6cUX2T6q|FDSIiy7*TEmoc)dgAA&Cd^N2ctOY%!{ z=KrPu{@MfNc089omocY?`>}h0_TZf4oMfM9pJ+$Sj+lmAdr&vcza)aAFuCf?561D?&U6XmO0n@*7{hJ za0bjXqz5n~CKs6jwUcWnPsLte89uHQUn!=Rq`RuSiuonss!VC4x{mk8@ zq)Exyyt8>Ti)I$RcD;5*cp^M!klTuNMa6^j2j{oI(i-}ssLSAM$-3i3{EK*u+GG*X zI}TM2Rk9Xmj&XNwckMjgJY5rb9_fb^0S#b%RufRmwGq&3!K`o`1+;6yI)I~spo^o{ z4R#8oJ;-!_X#WuAg3NU}1?WqlR`W7g4DM;~3D@;)?2V(Ro9`Fa|Fr@A67<#YLQa1d zLl?sU;{fAKFwrp4z$^xoEE;NBsaxC*G7K4pIq3>T8DVg)?uXJDO_)^P@GbVr z(kDmFS$}A*mVvdPH5iP&(+$B)z*%(^U^WHo2+p%zMXsVN<|}4uncG;~Sbw$rYFQ>- zChex}rj11&6?1uiK)!tla8CcL_E+sf3M@uWDwgUoc)Uawzu3 z@WwD0{uk==Rw`C1{xJMu;Lo`q*rwTrr{W6D0?7hNj3vfG6F&8iHI+4$@v?YXU9i}_ z*qw`Q!-Z5vr z^@H_;=P{3F9mg7zb3l4+RSm$C({0Hc8dGzR=T>1jI$*x;egg>^6c zZ9eS$WIe>*!$fc~a1rW;iGjDs0lEaPfoNneP@mNYbKt)L&vtJBYe)7n0)YEW>X+BY z7OuAF{KX3YjDiQRA6N%?Ze?A2GC_-cHz`nS-4| z)QVNfs$@sN6R=diRQ@%X45T1SoFyJD8ZFu`-Y(uM+ba85{;?bqeAyZC8F5?epU*v* zdvJc<{JbqmTatJfJ=5+?yN9-iwo&d;?seF+N3RU~*qt4n9q)VJ_vRp*j5D=LFf1@E zz;i>kKigjmIcDswutz!(Yz%G;^4!KgL=|8LK}13@cR!! z|C;?|?oc`zIvH}Jw|&=h*Yjb;hr+z4ZJ=#{{lvkQgDW|^m>QZI8dx!~g6Abf@H^Ny zS&mHAR-slQe$GEFe+p-k?4s<7 zK7>8n3mpp`*~!_-ig-o*>7>(1^k=q@Z6CYs;I@MqyEArYC1xdF6I~N!i?hWHTj-PA zCz+z&tQ}c9rWa2yUT9fpaS*e^T&uWN@x}ui4{R&kR#*qSGZ)(y+qyZrIXD-kcX_0H zBsK~h5i)-Gb3O$A2A2WvoxD$)-DW|j@X-Cx&3*=lRXlg`na+BBHDLBIb(Pdw)&)Oe zhx>Eqb7yT((^b<|<*st`@8+-HwcoWjDr!_zkXMkmC3;J=&aSiHbKi3h@C@+K@6p)W z*qUlfwOuc|Ui6XkBWFurOWy7sN| zyfxl>L3TmLH~2x{L0=nB8_x~t4e1>J9KY77bzYCZ9#5@IE_PuL!|uFJp-!Qx(3TEO z8k)qp`>XO-LO+>)cxo~-lQNUev^~>yw`{kJd$V|5(og-l`g0*CwXUhIiJk{O(|FIP zf1dw71+>S0kfX|@%5JgUVqqygK#xr$dn3Uc#hn%d4-7mIr;Jk$)(qA#d*J}+0M={P z!*v_0c?Eqsb)s)oZ&eckJzY(}8|SPXaq4mE55Wf2234*i7wIU|6}?rxRX5c))rFcuI7TBi z)S}yDHW~BmQC2GQ)%ogbO|@o*dWJe0dw^1%sZQ2-Sc6uwPQFggGZX6?_P5S^&wHsk zz2>^+q5+|msudC(!&GOSXPhmNS$Yew4n(v;krE#>WHV%(<&Oq~fC?tdt+88UcWHKM zsENFb48jU|g|M%ewJ5!y%*SO-PE8_ni&*#4v-7?2dn0or=0bbQA;WUba!oz#J2;~{ zqteJV!pw`W&4SVer3rBfaSNe&mp z`ZV7I{Jjo9ZO;P0=QHPHIUo#sFxijcZcNS&d_i9j9#|PQN=XIL*b>7$2vTE5?`&GNbukh!?|NkEJDJxwoUDWEmBYj6& z?WlI}9=H-a$j?I?cFB9m3oE~np+GGM=SXME&I*0|Xm7Mv;Zyk7H;#5hI|gFD&mJdz z)yK`p%`Fuz6?c$7%=;E=yJ`ET?Hh>9*@KA(6N}=D;(1*%7m~iA*6?7_uf%$j`-YgG zqypR%Fc%boUchd!8y*-R7}uHB!8tt^$sa9E^i<>+atv$qYxMMf(JORUb60Z}L~0|o zqX5tQ)R1!DPaSO?ZI~_$+XnaO=@$Wl24>3q3wWM6Z#-|Dmog7YOy`7*CT4mu`-@sw z{@lLDZiJN7lvHNY#$ta*JYW{`bKP^@2zb6r0rNWUYwl~t=*Q?UXfJ5_wI`<}r))!3 zbrF1YJO_0&bTll%`Esphtwy91>Goq^S$9`=7Z0h_UDp@a7f(R8>n!6eBlXw0>RdsG zmnut@HF7m_ZS`#R{NVjT*!R~3yCkW9;eC3IdyPBUne3cvoomf2&MQ6+u;%E%2T30! zO;4Pj$Q^^VJ+(d5L~Vj5oBa%|5B5KTp6VKS2%2DrSVxcxo&(Ods975d|1s-|KA}FS z`p$;fpJY#(H7I8y{VMuZoC1lJiIpFLZGay&57lJs{h=O!yA*~2{xbn|0CAuz7y}9~}?n48B1%YUjfg3PaDh+T~gQL<> z$^8XO09K$>@W1o-^7B5bcvQhUHxn!bvnpm)WPu0RyK}1Sl+bg|gRYah(m22w`JZ5w zXO*X&tDS46d8S$Fk$Q%Thl-cWmdkiHf(6IHK1Byt2iI)JY{$Q{e`T}Wv)nwB*`;>r zXvJtD505iXYKmJ}T3A#vm29?cwr#0>slB1Hp>nx3YanUEArjxJ!Hl$??=pQ z;x&~DS|RUg0FWWIfO!rdfoyOG*_QWp_jQ-BmxaDl_81xi-n(w;Z|U=;`BGko^ft1; z!;IQ(x^24sUG{ew<{0MaBJU!9rG2I4b9}3Qt6pQ&7$LN4}RQ?uO^`*AKI>zVc7 zPyV0$V*+CW_Mkn;I#~?P1kXST+)L2F@m`OSpO1ZDYJcVeKN##A?E3|z`_r*iVYpBW zQ1817a3;)amCvC}@C~>QsKMhbkiMZhsA(we1x1^xjM=@IF105Pa6?b8!LTTlydV6_!w zf#tvfP6G<<=7KoDzYF$|v=zVtHiBrdJ#~BPB(MNH2kJC+T6JnQwjjNm+BKzX$|~f3 z(JQ(lWrg5>E=7)em8r_a=i^-cTp_!X*E2IkiosgrTH{{rUTwT4o?{u!2K@&82gVPK zf9w9%4OR?RQ2ULSt#GexuWgfMljR594?3P7WL}x~PwSsn`Y)Tio4ZvSmFBkowtimx zy!dcv!dYXmzs~0qy|o5-lk$+g&b|`!ZoBBZ=sbFlknh8@dY{5Rg`G<~m*yAe7t;qc z*FM*tY0I?n9#j=kV>5KHuLPp;y z-znd6&vK6yA5%mCKHHO?lOEnr*yGT+G%noIq}+)Jdu2XnUK#!j!9TOqv(!VqTt8Jm zmCmNK@&0ora3;WOm{~yvm%;Va`qavv#G1ews4*h~Yp}PWAE=|Qqs~|5tA307EiOAI zJLWR>$INrjbHDMv@%r=pd7P;)cP@9fifI)?zjuw@HFobZ>=JT!n0L-+h!!8bF1h38 zj`WU{eV%K`*a=GwORWK}o35L_!p^Pb;9bCh!wcgJA)}93Odo1L)V{<%aMr&$rW{jM zN>$1+?D+f-RDtC{3hrYoSOu_vFTrRK1`e4HVH-l0X&_JmK9~6H;WLZ-2$-vrh<#dX zz(eppb|j!Bn_4riW||oE1sULTa1eY4>ZR99ZwMZxJreeoUQD}~#(k#TO_~Z2PlVLa zX6YBO!}NR5JiU2(A21HcKrVKgzD|9e`Us?_r3?GTOzEcdn&5Wo?bP0&6W|Ab5C81r zw8v==QXgQmX`j@-pbmH^?VYqq>66k=;j5^fS{tcGBT_AZp1ApG^V7zsk5A{HIb}L! zI-Pnt^{VNr=@Vq&G2`G#%99iwKF;aS=@)|qMGJ}^h#!bqGxr6|YvGJ@sC=k=TI96I zK9)WfdR+Kgt~dl6fwABdh>AWR`1a4 z(41GESJIoxIWhZ=v(>ZJc}aOm<0a!I^y=_IG8q1;uEDNB?k+j$J?Z81mFL%EfSM(q zUmtdP*yXP8u1_A4hhoZN%I*a31c!Qu3i+|^g6)DW%Ua?D9acu|!c{=cA^XjCK#l1t z(6qd1`4r^nUxU7rf1Vl!_K>LuXP-R_P?yKE`T@`gJCoRZr#_-#MMFX3c(&qfMZd~^ zmDCzX9Ev#98%zZY!4mK}_yTMJ)KhK++{?!QzY=@`hJ)@v3AmrFHF#e6T+lZ6uk0`A zn!8ta7jo-!Dsw8wgX)TEVLw`NMRCOwFr{)zWiepCb63T#ig-XR$y0E!@?hm_;K5F} zamd3R0oM}+!aUgIYz}$4sSzm(=$l@9Rbb$RK_7*RLGpPRl24nQgZ#=(5Vmi+?ipgV;2u;4eS2YnIeHWqLc{0Ah+7v>yuP%Yt_H3Kim}IQZD4HxrMsVc71ju~k(cqr`^1Zt2Y%*g zjlJ!^`Mwzk(m*q?1pD-cfVqG>kT?4``%eRY(9g67JCnZ#Ccg=);ER6t zK$yjl0_dNe1UScFeKsL5A>j7A{XFOlaSm~gwvV>Qcw#)%+7^P9*u$2NEY_#4r>>jU zo7NO}iktqA%i&@jNc)G_}tf4~)Rosgc8*7w)4h}H zY+LEA^s=$Z=OufoyWyFlCeT!767G?l$!#m&R^A>tP}C|-4NeWRNBJXI6@Pti;Esd4-n($K z?ZCTekg&&?_YdwT2!c&uG>8Qq!9+lfAfGAAean5%0O#~|0Axn~WuOkSvQ~k;U<7Ca z>H^-Uwu5(q?*tzP9t!sfdI0&_p90h=^E~%2bfC|1dZXcuu$k+6CGPdeZgC{^ZY%wfj?`3@U{jLe5@G zK_TFMZ5oIM{C#|Fc#r1mauZ%r-rKf0P zECW9ybCe&{xAOX+W^OC!3C{S=;Huu^WB-6(Jo|15Uz(VJZQO0#)cB2t50hR_&JU`mF-#POq}4|eIzch7h4 z@$B(jDZNrkZ863iWgTQ4WSmKAAXvsV44;_phgx%0P1y0_qYik;7qT}$VaRN4@ z`2aIHVJAhWx<94fDJ=kVro4ceD(uCvpT`{WEKmzH``?c0QN~gDPU4I{gU`Ud8@<3y zFyA;I8`u64dVPzHz_8e`AF!YIG4ep|0JkR-^ZS^&&Kj5b6TMBnP1^x`3oK%TPVuD57_yu-O(l_hUyYypBV@&L?v+u{=Vh(hQ@9N&wUBr&#zX5en z+?n-4`$8M%h;zJ?_)a4CTk;v|!p9Z(Lg>w9mJt0GuRO0j%e>3H7hD%yQJ|~4tGgz4 zu^k0`EvM?I>gyZo8>rLy#`ukqXOVW;{ll3hvuG|EE*bc}%mtbHOnnPv)IS8Q&%Xr$ zW5CG%WnaKNt$rX0i~v)>dmtQKH(VDo>zTC>0GxlkK|X9YV4sQo$(P!f+J~SN^aRYo zJ+3(}%vL5Lf3Fj`sJ^Hk09dnSC^8iPO8=E|CkIx3C5oa&(KR~N=+qx9ELd3Zi{%%~ zKKnj9ckfDU5*s8PC0VVrTEouNIkPyk*kyHDlK^Dpk(;HPrRx;yglw4_9cpx-zM!zA zu;jS-xEL*CCAB`B!BR^%ws34=G}%xF@SegPEU{}0R93u zK^34k?sG5!bO&9)Yh-Vq0eb;8_pF=Ih7g_&9pMw{1d;&{jMVsa2R!$v06%AQz~>Y- zK+iyJc;p&^#(@8<1*${UaOwOlWMS~xWd^fBB4Dk~dnfO!JlDkmYH;XT+Zfo0yYYoU zgHQvw@g{_tVMoPM@Hu!AdJ<}jEZb)AOHl(A106Fp`4l!#!(JDDmt&v+%m=doJ$@Jb z7yLEguhEC}g5EzI^I6_UogruFrTwM-19%RoDe52WAEYj-c17(9>eS{1<^?jbr+XzB zDIF=LW_yuyk@K1LnU&h*N0LVp;kra!^Fio?5WjE!+;>Cwe-ms2Ja=^khe1Bz*Y;uA zhp57);yY_r#_v-SlmuCO@k~(=DhO>uE=XE=n(*2^D}N?DASecVqWpQqIpHCSXJz#_(}2Cqpn9W-)4$+igl)A zrenNoylb6%om=5i2>KA_PSHn}37F^I+1D8-u{a^SfSS^NU?5lo`hpXl6M}}1S`yCN znPIRW&`(DXaXWAhJL?*F8+Z@<4*MtstA~A&^a5ba?e6dD@7m$s;a=uj=A*xZzP8Dp z$sXn*6nY9hJHcnc&w@sv6`#tkR31U3oj`(8C3GHs(NWN`v9@Im#o7E!FdMLk@JHy6(A&V<0KaEG!?8}v zj&XO9Ey0#REwK!t!EL2&rR+zgC8s54glB~RX8z4Q+&SDi&oj^C@p*jIC@h5^u1m0s zFwclYCiEOYeN$`fO<{e^&$k1xmZZintURn-2i^md!AL-_iX3nb%KDn$&n7Sf^agRj z2>5H%psoe1!Fi7u0rmjiH+bg%0_ilTk$3+oG7=v9ANz}tp>jWXKez!ti%;NdJL5m& zXEsnD_*N%)CwK?JXM8buG056$9=v}%V{&)apRPY$^w_Z${?+@d;4jLNt8=rV z2m1p&4L%L>?6}&u+NZH-EF)bbg`Lwp*H#x+7n;Rp@fODxM{j#?`&jQ-FVEIJ?qd|{YsoND|%<@c0&V7FCrtBuv>sB_eh zm5-IwUF3`N#oU)QIc{>CB2kgZJbLca87&(PRm^kQJo!91wfM~x%@owbXMrcs<*gI1 z6CX@In9S_J{<8kEJb9jc1fb6KR?@Ac6)`Jfo+ds`Tr6EIJt;paUkPMlnfOrgp<)JA zoZWMFPqI8&&a*PT4XKJ$1q1iEZ;-Rycc7s^jJ;*lQghB|P#e@I<4?vutx&>Z9KRrM-Kozf!S5t$->$!P+^Y~wiektNPow~6Ljth=< z?so1RYmW7j`;xn{x3QOJ%^h(&;%@fv#W`;D7rC zYz5)q$CMva8l^T$-3eNzwM>fzORz6yW6H*qaxgD-UMl;iV}{Q%UVUEu20IZxb${xX#zv-#U zZhdZTYiTQJD(NGn?rMO2fSvnu^TF4quT9?r&MB?n1>kug*N|&?h)i1ck@-H$0t&4{ zcz=GU`c6e19<`}+_s!k+x#e?<)9$o)c6P>=_&Lsp@NP~3w{5p=+&KqXT%j}G84qi% zWUqX$oclDGCs(W}Rg^#CVk14HJdfVkhil9`R|<*J10gXd#|Cqq5O>IjIg)eqPD2%LHow~jrF1Pp)&{B9S4;M zl~}*7RjQP#ObdV>_+8(-zPs+b?y<75GU_C1)TvR2UbB553R(XcVfmvYq9eqy;@Gq9 zvu@4?={MuOmEIrvnX2Sfa`w;{sTQf8iJpl#f2NQ0GskBR&K<{sC}^EegE9mL@Y!Pw z8HGK2%^c0(erSM1k8d5TbgOjS*F?`nXI*FA4ba8d#R!S3X*@WtKd$eh?xH@gJ+Hl} zyQnJ!^m~-5N>$99S)o{=s8&`h|5p92x`4cI*6PLJB=`f&($3P>)zuaDdJc#7Y8+rT z%0xgvP2_ucpL z%=f_kK+p})3(P(uYoDajq*BPPMAS#IM!`C@gpS^h!Y%}}#;loweO}D=Y>)h8K69G+ zn)#@Op_Z+=qq&1RfLvrq4)YH44tET9@SK5YFV!)49YoQ;{{?lpf7~(z)>O5v5zlS}TJphlEpcP)))tCb_Az>>fcVUG)YHSAZl2a9}*eC#Pc_dFMB$oZc6aJqf& z`3z9E%w8@1O3X~$3$_93uIa1djC?q-BF~S#y*sWusBFi(=$kAB#{hlbZyawNW9(z> zM*x2YkjS{XYq=#Dj9uL4E$1!FdR&GqTnqMVv+ugnv(gh`iLh*!ZkJ}}XXbMrJlr$f z(*QuO?EWnFvsij?`0Qr?W(l&TtB{LNz2j%1&qP>(si7{Md(S9DjbQ?65<0G}&9SI|sckW`S= zG{0$ntOYn?9qe`e<@w7)ojr9wY$B`%M?mdhZ9#)D6Z`vIphKvGu=n|G=xs;_GaF}a zJHc7d5IdCOfgVUeIN)rAJ+HBVbt=z!tX*F~SIfK^{<)6j9R*(qeKgr+*=*aC@hnQ6 zE%mjl!9O^evMKy6Fahd{c;-D1ugweOC&pm*V*~VejE8=fXT>p~O`uJH`8#(4cLE>! zKJ@Ji?hE3w60$w%ao{!83wfXI;PIfw=#=A>BNClxUfcg7XK+ApK#*GD`M&u+UPraD zkDL9_CY~mqU??b@PhSOJ1v#r>z0B8g0d{yWqn777_RX0e#2SAEoDBo(YMDgyN9_yBVWPkcrUp7=*p!>%nlqK0!NB2Yd(Uo!5eJz`x_$ zvTw`ephtO+@)ngXDrbXR$a&ff&cajlB={tFu>4^8k%}Y2pF4>ip6sEV3j(BI@xODn z$LDKAC_?a&^GwY5MdMK85YJ?fypOyhEH9>3oHM3sceQ)8bF`2>!MZvcJ1T~1hH9wa zV~xP)U@o-p7@-`1)lpNRE6^GA20in)sW+m};4|<@{YY&GPXYUFtks#BTVbdW&Mc>l zr;P1Q?a|5XEoi)WzHx(}K{cq6QX{1?XaJs>o(aFc%d|`AdA5WOaWVR(oc$+)gUGMf z0rr6TEboXt4b+A$Q7=(92Ue9;wF9WZc;$G(AB}b!Qr`C~W-4bYzXu%t(@SgyiP$$V z7+KhDk^4+N8Mz8}YIbT|S{D+{#tC~Qhv;S9~B)HbyRg!Ns=YWLkflzj4mEs{B6;< zMH8J9oexwGR5!7=>azN>dKz|ePt;D-b}H;t_y#)xGQ%^&>0KV>8s*v|*&?ZMRya?4 zPJ0YagR@#xE$o^(i(RyZzCs_LN8h==bD?eR`D~>&>=fX$c@cOQ)CauRP`^10 zP>;>I8TE<#efyz^T7#+P5@B9W?OP^Lf(P*M&?C+)SI#JzX|oP66N?^Bauv9}ZX`pV zfhzRAcZz3<=a%~x(su3csh+8xZ0s6mPnCWF&JM4;u45z7Kx}+n2@J+ny9ZDXu52CuJsVTV#0uj-Ap|BvT~J5B?7smNmj_gkME&r{1p@>V^l%UwOam zeIZwLGBP#CfO@`qzT03&$&Qk4vTm|zmT8tcMRkg1Cd^E@Q+%iR5Az>p>cV)BrDvOS zyClGD(_aI>2KJ!7nd+bF@9*yK<};T1yKbRwLZ2-j*$KUoeartJj`@2YpzfMA@<(75 z@^7g(Z3F1x;WIoJ$x0B2)-E~Ei|KK5D$fa8F*<~qQdi*q(=5-$VxSy_wi04@xOr~9V+*q6)j zW_URt<6QH1kcliHli6fGm~=3y2NF{`1HX*fBA=U0eNBCxlR77{j(F>Q>*RZh?=?Pq zCi*A(TUlCJo&cUFegRiQS3@hySC+FkH^n_g$nWQMz`LYp6*_|e`Lz7|3zBCi?FJ|Ri;v;xmWVkY1!?2rc zqhq6Efqj8}1oj`ZrLe>Jq9ANvq|Mv!2w!?)7n{dX4kn_xK_9pzyqG{ zN*At`r`Ute?}>ARe(rwm4_qI(`2DeGD?)DX1;C6Ueh-t~likgb$K4zGyR5%>Z`zAY zB-VKRJT1K~z0}6=zQpJ031ma1dD1+0op+sXd{D#rC%noRLKi~RH?(xMbnUV1v23w# zv47(EM9^T;Gua5aPrTV&hfj|>@|(7sw$I(4yZ43m30`M^#0pp$+OSv^Um?i z@gTO@QcGM*yiv7LrLk#j(=F32PJHA=<_Ws>Ny(FvmztNFH@Y^us8v0NoY|hPo-Wo@ zt&3Y1C&`k8b1(PLv{bfKMu;LrJv#U3+|=3B$$2ZU57vO3h15pAodeK2aSI#+#b6DX z2%3PRilPb?IDyO`&P<*{XJY|tzzV>>s~E7RxeYjThyYQbEocrJ1O9pg@DiTwJAm^F z{<-~tH7Ni5Z{R0T09Ju@0Anwqug&_ZCNLu#dKzGVnRW3$U@vO*vzn-$5Y1`>`0}T zwilR+T_h`QD{W~dX(hb=`7Ax_KkGMurtn*A5N{CEd(*5<-%-dD)2q(^9|eJJDk?d;+6^{on41>aiVTF=?e z*`z9|s-?Z9eeAxm``B0JYkA3c$;YgGv(zlTqPwDFkD{Keo-7V~&_2_Drhg@SC7X-A z&iq-tgFOco-U=_zILxIPS2V6@W6Z{wLtPGaiIhZ2ST8QdZdHt^RJ#k;a65SG=1y99K#apFYrCVfMWYgu-4BaT>6GjnqY=OOvZ(A91;Mx%L`%q;sB3Pw)%*3pw{e^Zl?} zy<0ujFxF68U0cl_NM>Yao6j=Q+CH^q_SxM05k zwcKd?R@+t^&2#n$dxTTu6djQsLF&yhDffSTk@!X8o1`~MeyLx|?_-Q|jIy_)w_=`T zo@7nTniy&ntE5%Zi^z{^tZJ;HNBfuLUy?I-XYL-GI5zPTa)Mf_TB;ggH$|Q@PZ=%^ zmpT$1iOpl0$G|QnN3@6PzWTnPMKsAxayAfq#rKNutm>>9q8_3is2ixek6lR^kHjA< zK32>cnA(mvmN%Bo*ade5`<1`2e__|bK-$sX(cZ|?$P%s%$0n8Lf~TDRO8Uwl=pN`= z>s#v^Xc}n7!uR?BJM`AUf696W(Q7)smQ4dq1ET;ns_gatg)Gb8k@3y?kNPcYw+Rw@8VZ9W4DfT__dt#+jDTVz- z#{T(lpth{GY+B5;m}q;noio1~-Wgujt_MIhcmr5Bu-EYznTwqDG2;;S7Z0_g)MoNt zItS2$*aC=MV%KEnWalFLBKv*YeWXb|v@vgfFnGuMj+MJ}5(*Ov!-~U-M_5J(J^$y9 z=Z+!vA$I0^(&Xy2Ic+x_Hyo^gY@mz1i=Be&e+vF7u;3#Xh+HC9g`>j39Z0E?RLQvn zU_NKg-jltD_pWmQE29$l9GUH$?X2Uj;~tSPB4IqVmiwLio!hM2tR3tf?7X+Vu)VOQ z$W!Ef-F@BdMeRl0$-*%| z>}T!8x%zJBZlPDu5E&tBook)5i)R;amv5K9z+S;o@G(;ZB+`ns)U%9|j*>QzHIUIC zI7vE5x*}mk!XVoq+hEsV7iTZyuv2C`cBMQ_c$mPmdAd4XyBL9*44(f@RdBHjxwuaIQcZGX$aB^_CYq+ZfAB+5p z{HfMdYu1-pU$VaXFZf?@pK_m)eQ_`LRZuTP&mikO4fHcsu~qy7@?+WWrdCkylDo3P zv%+~@at9858{cZa)m$`OG|V>6Hr~bVB5K>ZrFKibnsPPew&}LeyLqI4q~|QPucoht zvyEhZvVJz`4*0r-lp*D^m}N29_H6rJ*IpM&Xm>wPKb&9>dw8$x3|69N%`?Gt=X9sA z12*qSUX&zCas#`{4=WEV*>7s6ZiflUD)n_RP&1Gtc5Iovp?M5mg9qRqXsT_B4ZiQA zOXI?J?>AcBZ`K3OUg$d=066o04&?C2_X5oT_eIpu*AV(SJq$hAI)NH*v0=A36$aG|Ke z8@S-lo6eiW2C;$jrzM6ZhK-tynhWsSRhlYI*Ywx)>{s)>Jq0vMY?Mgt9&-Zr740jk zqp72z5BHejn1cE?X7?-t^X&8NtV^kBoTQti5$k zNXmrR#ThXM7E|No!mrgC%u|E~wg`7Y@LR{sxbFfT&S$d7bD{U*I- z>@j|5{Lsh)G4ngPuZ?;`>Oh-<^Q!ZzTU~B-;kC@O#F^$_TLfR5f#`9Y{}ka zFTH|%PWpi+lQe`k5K93VCY0AAaTd$}ttWoqqtnmYIc_ zg*QuXmTu9W2LXKGnSgqMk$Tp-1`bnyU{5N0Dry>QX8Z;PHPOe+T*ky z={?d%fn@;g<@Ddc4Zwe10vEt}a1I;;cCZv~D(UHWx{YqpQQ^x4=C+RtFqeqZBUgiQ!1N8q-1wBC~;C0k0rB%vEuo&=tvJGqmo59!r zV^hkelrO+sz`eE*YF0Q#vEA9S{F>iT21T5M5l(Rc3e+`|DIh#xCI zR*F<2mCmd)8+}F}YjVynsL6|pjf%C%EV4cJJ@zlK)0O%+&bMIE@|f*r`;o*WiJWUv zUmt-yuA9hzABDY5Lp(zSJqKsWoXP%F{!=+~_iKUIq1U1B;Q^*_W(l*weWqU0CAu(IAJ_t zbN~)8s{v;)e2+4>;t;YYz6Jbu!ry<}c-uJ4Fif~!Cu9HhJmWm$&xW6oVpV8p3~Fd= zXgg{;YN*Hm(D0#w=WyoLU{t6ZrW&TY2kPkS=zAG^8UNM%D_l$Tt=?4LRDNmr(r_^4 zV9E$ERy|hD_dO(1`f1W>(k0k;{{&1GPZh`e;(cSSW3B9O+`){!tGTNg?Ez^$S3TEP zlCLBxc;Q80l4_EQ*%f>#d5s@a9aE{bYHg}BRp`rY@ow>MRc=+*b=Gy>^xX99wd}Q2 zM^{HvBYs73MS-`c=!xrzt1dk0^mMR?JPCUScZqh1ct2RKS+3cj-JpF9>gnp~3bX~< zA2mN}_Nex#Mk+=s21y4=kHj5`WA@@8=O8Eh#Ow!h4#J*z|4@HSoZp6c5sv_z+jR!i zEpG?ufLVup2CWA4$gp4D0mOn89tr>Bz&!)^bcyQ{H#uf<4C_tq{Xtts^Z@%5cDr}G5zm>-pI^2o z+jCrTT)|#p8nnfn#ZcQkuVP+BVL@TR@5qUMgq<#&`EF8gQnQAYVD|&{D)c90$}<0N zZ_v)XomZ?^tVgj|(FAYXVsIUBPVx$H2g*P|4ND53zpR_Rn;mjwTN`H^C$)hZkH(V& zSaR%9+RBQi6dxg?F{jggx1kNg5y2EG7Jz;kvRa946yk`bE`dk&ebyq}!F-T-C* zhWo<>pF|cshb^(6k+YX-5DCp}9B2xjLi2PF+ydPoBY^19jqA%)S?4?%1TT}_yJNXJ;Pkm4yjD=^AdLU*+jKQ7* zHFnVP{j>)O4b-cwa<6iqcAa(&@(l9qMt%alrx(2!y-%!9tU2*H@ioF~gsm@KFL)xE zvC`Yo+cCKH;MNz-7tIm-BKEy&e%DMr(Fl0b&>}Xk0d4GU?3}qyP)|@lEPhzbP@3Nl z{C?nT)z>OM?|#z$q-9nNuOD8Q^hn&4-IRT*`cyS7Vp;@y9GSV9xzx{2l~0xTH1rg* zUVG_!>7HSC80*=wfO%4XfWJX0a0Ax%8;u){Re*cr_koW9J?$li62lMrAM^tOeI%?O z4uZ8{4d7WK4;%+iK_v2lHGm&20Qb@J%z@Fkjy1{=z@4&lfdRAvJQptn=RkYtz{H>f z;B$oc2-Yfd0P7Y$-#Y{5@f-r|zg-3VUKSV@7`B2p`Zq%6G;>`yg1h>=`mxXjL+)vO zpnrgBVuBv+b}e_1vQ~;TMjC^Lpkb$Or;hmpklxBRXf|kgCgJD#22i)R6nmAgE3PZ3 zGv*oOhVh1xd0VWVnHzBdvuzn*pKp(TkKU*CX@@I@D<&sSPCTYPre*!J$GFEh$uvoL zmgE?7jC?H(W`mhMr*Gxo%6UJmDXJ+t55GL~@;a$IsrxzlIV2v5XC?NioW)*4W=FEW zMx9rVKgZ9RF7FrTk#iSa5?vCR6q&SMyk7i9^hVSoxkWOL7YT1--^Bh0x|Mb-z2>;) zU{8)co#;SxU?jBn+1PP9sB942%IC_cKc|+QUJ4E93)G+~FUTwFz+^i`YX?mNm;dN<2y&?g)4E#?JBm{{8+r*hSGIu|;B-I!sNS z;yLv>wLxN#T+h3nN9_a8yGM{$NL|GxqC?NzZ)?AEw7o8_D3^jp;g)!4&De=^Um zwXpO0Dl(P_8VABnw$~V=j8Rg*@m%{{yEbWU(qY?S+k%n>C44P6={D(3YEKF~pm{!L zPmH;GvyhX*`z*$WAIwdZ7xg)a zMJ5upOdA2SRyns`0G&_$iux7vK^eFVP5`t-gj|7OMNrtm#u?dIPzoLc?tfw)S}GU} zn04?mm;y!uW+3qQ-2?o0M3o4%I@C(YJsVLuqLP_tX8`lolEJykbCpk!VXy@J0KTjI zuCnK$oz&YKlvRQcQWtQzN-z)5n;xmA{qz%}?$oqIP;QMnvvKZe-t}B1<51}7I9th7H zVa9nja5lgiQ;l86siD+RV}E1+Gu1OyZ*6Za&s`$`>p0#c_*&9S)f9{aSKu3Yggr|1 z{Uib-vY@bbKG7^U%d^&Ht%H2R&D_mTKto?cAOCJ@G?;<+5U`eL1?aora|iN$LF;iJ z89;o0=t8=X3)C#DDfqNf%Th7vONY}bsq9whR%j?-J%14y&+Wlj|5*P(?0IApf^(}} zcP`Y2_1yW$x#joEeRP~Ze*t)QU_X-D3zbvl{N44t>lgPg?l@1JhtG7*3RXB)I9O}( zoK39^GvdR*0W70#;B4RwbBDR9N9D6R4B14Rkngt%p6n%n8WzqAIH#Z{ftouO);t%m zZsTmA8tj4gtsCGq-3__5t^BS0Lca}O;IvR$$Qg77c@E5kb|pWM4;Sve0DS}0#1vqE z+Yw*6jhXqT^p?7s1wa8&M7YN75KuigKw&3s%3+BgSR~>L5eP4OZx6x zqxUBV-y?JRvG=ieQPH9z_LR=J&bTf+FFV;gWd zUsHEJ73>5@0P|Vdr{YYLJ+D644bex|M_B_p!8V7GaS%6j$#ps%y9lR4{7Ko3WzIn!(^FclQzqZRZWUx3Mu z$&P)NeU=}TKPVMyg*p#e_|KKkl~HhLj*K4}zh1Il!XD8Fph{bXQ+GWr&j~|hLu4bB zBbC>5*L3U+sNh3Mm#51Y7cDNL?%+4aZw}^5(?8AEgnc8vuGDlqvOKb^!!G8L{*nGe zk%uC6$a{F@edYbZ`GYf8mMhe%)Q7}b;+3)IXR3 z>s4y@8bPl-8=k78@Y3H6-Ngo(S)o*4s_(V=wRvXJ%%lbJ3*x7lrG79|<;suR#IeGvo!J_x37at`K`~g8>_N z^gjP${KdEu=)hycV}k?m9`m36KfMji2kcvz!22K-9Mc^WvPl~tcQh7I>q0Fd`)#1?z@V262}^`Eh^h|I|kqA`AtR0twF@O=L}k%-J>4HBx?!*bdujL5DY7ZDX@+USe6*ISmWf3o zugfj*TjEElN2!?+G9_h7$~3SK{A&8u#NG?DQjK6D=wj+(qFl_HsmxG>5KFs zL&%V4%oF?$ec`cY#$GabY4nr0qkRE>$CCBsHLB*o2+97-6_o}O%LElxC;GM z2LSu?byRg!%;R*+-EuQ@l$=kl1Jo|{RrXaLk{y!$CHYIT1X-;ORSi|q>S#54&)KqU z*=p-*>r2~98(O4dzP8NwV^&m@GD`Wg`Db&KCCWm9AM5*fkvYYFNdfX}TS{9>$I8db zAHqA%z5~7H^vIndJ%hq{B-?qsOF7@8>}S0+ zzcl}b{FqbvQ~E4ZmWgv*?uhx%{GWM)V}pb9i&fZ1#MhF~A6}aVnEIQko2kD68*p~% zs_m-P;iImou80|N%q?aAZ)IR*AS;v=I)FSTYHap^cLATbTLCkS9)YWXdYL1D+7;FU z1|S1<0ky`gsi+^G3qAvrL3cpy1?$ii;BoMAa3P2UJo7IAd^WQ#s{^PVJPtUUXQpJ8 zugXU)2ED9yaMgR&OD!RF`#<}B_VH{-ohY>wtUu}X=b6rr51tdnZZUpZSKajN)C|@X zGA-$I+yO1xQ|D7B^&ye)1<;$iCAcNXXAU(|Q|(i+Jz%!ImAjRjx|9CM=BCE%p5mT@ z?|)wN+y%{h*;e~jdv=HH4*X%v#7qY=EDZ}kKD#(0J`NorKi?Gb6!Gzc#}Cr8N(~L) zPg(9PceE~A(9(q=XSRK^Ts=>!C-oJ)g9WJtslGH{T6B7J`iry| zX|2HrDIcWpT)~V#di*FL+zjXiWDbWFe3bf8>c46KrWF7iIFfoK)u;FAd#ChHd4McD zE0}JYZhD&XG=)85?!V;EZmfQ+K3keCJ%^p{Lv=%SGwd_$>kHNw_!WM|=d#abGKb7j zUsYcAIS_8rJP!d%pJ28%4cnCr>AjqDWEn8vO42-FGs0GB^tR52@`s z8aVp@Xgcq}D5|!L2c-8BAoKv~*=)AVY)=8CgCf!m9TgA(k=~ntV4?StKoU|mo9%s* z(0lJi1Vp5X(yRELGoLTNKVF5rnC#5lx%YX_Igiina=U(U{^Hz)TqovJ_Xdl=IsZ9- z4R;N9oHNeJEGX8@xliY_*8zT$_R983d6GPdIaKtUt%4r57=sX7`C3VNmdw*x2E89O zirwIurf!Ba9J;_GYmKwUO-h=SL@x@T%W;w#V;mgTe|T97px5?9;)%qb&3ZPYhceO= z=~)J-1-}ZdFtxx>0nagCg0BGU`aEm@>G{*cc?_}6SZ5f>0i2C77CI1S2Mj`;KLI&7 z?CJ4E8;g?(b&&Lvj|ba;70~y=GpGozK^Mtdz>C0(0DJClksZ|-eB}MeyAJc=XSrv& zC%7iKesuomwSdB8tm-XA}|y@2~-)^WZEyq2Sd(Zc)q z_wk$~{S5W;#^_}7U27jT5f*dT~Tk+ z2DQOGzCF@>=wrpliaEE+;4nDc_{A)@E6_Z1UI|4GJZr!P<)PMlT;aIFp>2k?8KEDc zPe9fV_ij8V-G*=eocx@;)LLo{2fUV?7r?z+H*Gg93tiOoG*dTIb6>b3Yeg3GXFDW! zNPd#{Bu{3O*-mAh%374TD3KmM>D;8AsixL%q;{m1xig$&!2S;VzF&i%0M8DrO{f8D zuAIBJNwZ1AS`}-D)Z43yR7DGI3vE32@$B2x)fE%Dy1VEzUI)ek)&;0hKLp}k@u;K~ zxTd+Mxx0G0dJX_yU;0dVhQ9!%JkR{w0sAk5K{+rG3E|ZBu)*VlVTC%^XMi=yYo2SK zD0t>3dM0|h`nvkoBLi$KU=4)+h+EEEPU`Tv?^%X9PVE780_;yQ8;kXI-e)||(U)3e zFS7GIc?RYhZuC^chE6ipP=FVn|OkJhKcr@~;s`Z%xUB+VqGKyTL!)ehB`wJdANB7h=S zk((-}ii6;Fy6w2_SPbt#7(Cef9Qz!{t;enGmGlEG>@DnQwcD3DmO1zg@dFxzBs6Bs zoCNPd81igx25w4O=UpuH6p(y3Y5pJZ)S9GkIL`!Lu5(60qmc!cTA0)0&s1Oz|1DT zd-xt4;~nE|jhR#6Ulr#76=)iKOFTDNK4rUr2h;@C5xwyTY{ zjdlP$Z+T*#Nc}P2s}nJ^XlT;Vq`{uS9xgDfC9xm(chcWUBg7Hnj@TWs%%0e6+-zh$ z=~e8jSn6WgkD^wDH3Rmss0rtqHcLHA9W6$SuQjiwS^?{h7m>p_6B&~9;+FT6_nfz% zx3iB){qk_fa7RSTh?XVF66O8u``O>ZqryHLwY03O&jaazXFn&n0r2BV*^9hZtQB7X zncxr59CLZN&!f*`3Shn7f-Klo@ZnJN%-$*Wi}adb#thC3&>VyTuD5*tpSz#CdF{E! zqn?+Za!G#n||<@qYqp9R}zI=$aXtN%}h$hFDi)U9Jik ztRJjDr#pvHl}nc7IN;1%|&_o`2-P9oi-p^E;D1zMfjhO*T-35DM!K9$%>ta?Q z^*VXEdAZ}o@nUC9XGy0~M^{I8S8-RW)m}y(aRXNa7xxb{-80?2mA#d$hckna^|ee* zriPwjyUMQWZSHM;ZGCNx#Jnl`5h_?KSQi&9E|UBtIGarqry*hZiZ~K%LjH$LEmL!5 z5!aQ>n9P_ut%0nLY_MXm!m6@L8N?rnABnRyvo*hKf7kB8yw4x>Kj;+(g~0;q80#1Z zf$zaaz@F2-%e} z8&CsU3EbA-)>F@a!FWNMIm2wm2z`XUk=O`##88p_>S35w%g-bV^Cj9na*f@rGgEaV6y4W_Ul&4Ro5b2Szm|K5UU|LpzH)r! zC;`FZU@`YCd@gyvl^4nj^gKL~J&~~=!aC!0({$5d%n*q}Mk)WSoT{9PbyRLNn&_M8 z-CDPn_cwiF`(*oM%)r>K->uhTCRjvDM9K<45zanv7@P-3z!uOR>@)3?>gFFBKQ!|5 z+@af{^JqL8_T6hJYA7m2Rf?h}o%fE#WBY^te-3Jqip`4^ns0bdlBp z)*IOC_*MKB-TV(kY7^e*-$>aq39B)!5#-9=qjeOG->QBF})O;c3J z^%^De_nBwPb>_b9zOAyeGL$#xoaShA^iO#|<+1+O%h$`t{0lkqLwHtNY*{S%qT}>& z`YD*<{4W@#9;K$1l3FgFE7u#>8=GTp5^KThkv&7rA?wSBHHS6q=P;w75uh%My@bz% z&je<9Uy@&v*OJweJ&S%8JuhcoPKmk1O#jG3^bo1#Wv0qcuAf}(-R<4gkW_1%>;Sl5wr&iy3ov)p6#1D}G?iyfD4gY;R z7z=pT7zMb0rN*D{RBCsgdY^i)gB89NKJJ5g?q`jbXL$ZIbBnnTXTLblpXWah|9Gjt z)X#iJYLE5<)|saTrUe=R)=|8GJ>luW>A{}J=HTA@N$^SVN96F`4c-j~eL>$-_&r&p zuF$GNs|In6W8Gf*>7L%+yU*-{yGRP>MW z{^4^;4W|P94gDFlT5*mz>~@Abm=R@j+MK^CepPTWpeK+X7oMZ2MWe@RZt>jWqXkC` zI>vO285KDyl6^v68-6CsklUOMmVgPMJK*_`_3h2>&F&tUJGdBg-&os91Z%)?P#$v% zn*#QpsokCdz5z?XEzsTF-Teb#{y@AZ-g5--XJ#Ll`(bLm=pp02264>Zy1u&7HRHXJ z3~WA|?}hh;cQxRdlII!j5n6#$=ohmVQ5Bp6%zAE$EXNbbHss#nmiv}_5WM)zCE`Af z88Rlodn4bKkM8yfm&5IFvyQhZxCxbmWkJy`x)<3O*;$`jg&C(4m{*Qj#+TvfU5Z)O z^u*C0x6-=OigE*&%wRWfH}5guF7eF>Z3rJ#nD{lbBOdi^PYxgSeYmZqR(EosHs=b8gnf<<5$=mBhCS<14M zZmHd*Su=9v){Ovjz;f_Cmv9rvvKFD*?XCyMe~wx%N4?Big?-e`#{T&w#a7`cIl+X5&81K22Z1y6-Ts1i&_> z4S)|omY9W;L!LMjJQbb_D>N%K%e2d+Y`9m-S4#S>n3qDIRDw1^>K`@3T+}#KoT`ep ziliC)M)*cJr9Gu(h9iHEIoTz;61*|%=)%?E>T&vUaCQ#YN5P!e2bdt@rALIBf=3NU z4OMkjb=;$6s58_nwJWta;w#vPeXM<~{S5OJUn^fLd#QV=xxZVKyeK&v{$u)6<1BF& z?#cLEPIOFkFo$uua=CJ|ezTtYO~@V;T>rTrt){D{!z){8AT|)GEnB2sq<#wM9Nt@c6+VjCc!2KH@1w6@zY;){$?CkZkw(|tYopPkeO>i=! zv@ba6IO%Yp+3&WwZOs64ORre2SgI6PDIRYfZ+!ueb4N!<2j@csLAWbidTv=K;kl7} z`517B!bu?pf~n7x0WU%sI@7kw(sstsh&d72tFIIcQ>U zVxLerq0m$0DOzP)W$WbX8us6W z-vqu_J)%dvDqNNNs;3mE6p&Zys4>2XIh>P$)nGNS_qj*0N7~0y`*Oi?!NI*Lebd*G zal<~se%*d45AQ?mhuVkYLopM~P|rYhzKi<2a9;Rc{9c-G*h1SvI|B3vN5msyZ&h#A zti)M~Rr0Ik+bwp>R_j))$!@a0a=dbcTp?GgJJsC?EW)g?m(G{YcHme0ul8BCS+>d6 z$<}Gb(~8en&RDp9@3-!^cCdG_b8+AryT`r9&Ch8isOYOGWl!@)sp+o?McD{RKbss# zhEueyv}T5*wxa>;gYumF7)V+>O2cb{S=CC_ryr0}_<-n^WroTm+Dcd*~Vvpcg7&w%hyc!+sr)F|>Z zs0P;i*5fPN_*hTA1~M@lfq5*f->|-%555Deg|U9XdL8dWdYxN&T6vndo47fv;tHT& z@hM_hP1xfh~eo7a!~kh6fbjxPQ#{`27Lz}JBm=!^FT z?6b4R!EB71fc^H4p^l;E;5+aJ*=&CTW)GJDJIDk17$`G2G&xkKq)y2X$o@K8a<+v1 z59<2Hfv%yhp&KPPN(>NIFrzpeb+R#--}F6L8(bS~0?+bQB{4SwA#?Xnu#xMfT%6dpb+r@>fN#imK&S%Uv&DFQ=FOiS~(> z+6NkEm6a86hF+b9fW=IPIV^aU0Us8WbjYf{{NU$Cp053tk^m@{a zyH)9{(ticfZKK-`1oJ^2I1GLVZ$QmsBGQkqC66^wqb4o`HW%_!M0XSce+JohQe@3$k^$GekecHX$ zd#N|lZlqD4z^|t!p(L#&tzv4$)Ce#-ZFJhl=^v+W0^-4WoM=5v{!oS%F#{$l(% z%p6HbN=O=O8*B4wyqbfU^E6vGTle>lzjw4iu5KM|9qmK#TzD@03)tr}Ys?yIsHxd6 zF_svInubbyU+z7rO?qj3XA~?mS&cQHMRl50HLkAt$L(tq^bf) z1nWU1btS2WFA5?ia_R=A252zmjdaUTRpynG^Cz08H zHb9*VYZ=_fj?s?M(jQA*9zDqdpl*#d9eUm9MP$9Dou-{8U(6Q=X$EQNTj1~UTzx~h zAs9d|U~P!H<4xd>ct>P#5&vugV6M*R$cPn_#pHIfcC!A8{tAkWRLCEQghq^>;s>q= zuE_0?+Xa{4(%3cj3)Tx(&XB6^sqSI#WD4eBc7~qku;Z`;QBckU$XH$ATHv~b{t&%E ze8=&eNG%7iWvVaL_a4ww!`=;ZO_>MEzRpSaNq3Ar#@+xLu?%~L{U7H)&X7CgK7kC@ z8c8*h=zVCcX{`AktQ1#@3e0_emHaAsynMX8U2?nR2IzfKlSohYP4TAKS=U)de_B05 zJ;N!$c`h=O%#;agrPN9p2{@bfcThLAZfYz@10Vf|9wJ2kKF=)l)6%;~&vY~J$n?nM z01E(p7X87OU>x8%sx2@8`md*h1KF?*-wH)x=HWkoENF6ADrf|SHlf1860{)$C^=h3af&3-l5K+&L-$v^4Ywsy{xT=`3!%{{+31AqwEaE()o10 zi-6a1U+lhE=A*1rty9&I)sS5mt_$?2v=m#4%e z;5FC==+)$BSrJh8$Uip&dVZH1mK&HUP}5k`SO~h9y3l4Nd3YZqw}E>G{)}yqdrdFr zD!^Z#>Oa*p*OPgjPJ`1xAIM1%fwMc;yIe!AVYP9!RAgAHU#kDs_^q*?sh;T?s*eT6 z0^>TvI>R(!n()s0&ibY8OWSJaY9x(xb#fi0ZqV=cyP1V@PBGxI^&_D@+KJ){ac%+axQJ71X{`2U8#V z=m*XRP7{I#W+-MTnrND6AcfFw0ZTPYH4(`3?4je zi%Woe3T6;?3v>%qKrZWNfHe~QL+a|2#gmSM6QoS>)j!`HAa^Ym*|IUDI6CTp6AWPbJJI=1cpe;-rGJ3ga|!8nsz%-r(5apia_KXes3S-$>U8 zN#qrE7xFLUGbgK(y%H)utL)EJ&sD6~>k@T|T-%n!Es5j4p3e~TYp7ZM9jvylmi(S? z3f>fC$TQ@9n)PW`A}>J_)#q}~LcC?WWy`hZTDRM_+txbPI;dmkb2-~K+s5F4c9<>Fiby8 z4@sMDx@NkDb;6PAk?MA;cB+HmbK!HLrluy3dz#09^_K5J7hM-g(|$sKLQhTMEHD#{ z2ke)y4|@PG7lO5WSRwV(0e$PNP2U968MCgO08J-a3Wlxvt$KO_w*iCRpzm(zZg>g8 zjA2GDw7vDcF>yn#9{}Gz_1ot_4rpv_Y|m?~|z~Qw<;;IToBDa2weie2(eu&PdCE+OmGyG5s<9Dq)rIsrFOtCfz2T zBgK)DieJ=@-&fvOa()2)fC=gZ^-{x9!$9!6=6B6taj=M^nU0=6*28}R%-@)jJSF)@ z(~qY0`t?|KTj+W2f3*G4c3#U``dWH3;5rhO8kNeKXsoAK1$Xp!py>4IBQdL&zJYg` zqejgpwTkPt>$PKaV|0u3i==rad|>LP)P>sfMaqoS8L7Eo7svy!C?P%KMrtXT0vdvx zl$?|_!2E$e;GFTCH1q4M{;YnVZl8|N_E>SO_><}<)eHFxc|>AFVzq>72@~Tc#`TWw z9et_QrB-24VNtR;S={@?_lb!~iAiz{w1Es%u_I|mk}1{{yEJBLOhi&d(gno@#SY~T z<#5GtML}Xg;?nG;*@k>WK6N&Ol!KJ)S#l4B?Yoiz*7OJL5OxSlbxU+C1V>-f2gVPKPjyeFebXS_Al(DdAf-V{ z9n9q3ow7Sc0nIw!#k}@7UK*OWZQk~N`u+4usg+XCLAN_Sb$aSA&@Rx+g%}G{Q{?PZ z(|!SS@l1gIP1Xsy*78~w#1+JKlXa7Ewo6oWRJ6@%6Az)~Pn z$P{%obu}hXURz$f8cYB~0P`&zqC;fQm7WsnOSlJ{h`hJ^p8KAK-i1=G#X;XeDbK5g zzlERPrCES=?hL>hIW_(5xVP zL-vMS$ik}Vspw(tp6fGfRm{P66gmob*>>4Rct?1PkeAN0JZp)AZG&z6Oxru!JLs=J z5A!!l?5vc#v(iFS7f1| zLtSd1XP~FPt-fuExJ3Nc_N{G~W0&JY?}uK&C;0F!NcB3_Jb5ik+$HWCpeE+PwFe(~ zKajNex6QZFL2qa7SJ1D3XVu5*$7*W6#>mIW7b_Plxt>fhOfjIvZ`uw}rZX)Bl>uwT zTTlb%8pw0gC!i-N4<2eBYJSEn?><5wp|QHLI$9O2x~#mc{2eR-YG43|K`&J=)nMdp z_s6V{YGO6f4zwDrhB`FP3;$aCwU#w$)~de;)%Dfkrf#NZeftbxPkAGVGDI2rffT^! zp8dmfV3A>wVT5r69NfpGp10f}Hw?s#@Hj)9%;XCst6Tp?3`E5S~j(HPMK52 z8a*>^scQ_+4A1<^{*|42mtpzC@_B!A^UzJxO><3qO*`H_-d!QPLUvfq|&cyJH-08^mT85|lc&62B+ zxof>ZDd>*b+mpdK&;uBN4txS;fV%*ZmSx?+IPe3&DCDw};1Q^>yTa~zpfP9)8i2~+ zW!cNJ-@$o6)AS}V5p)Lepb@AF9+y5Yy#vkz4_F5l{Kv-9jiuQj2do5RKo=kf{9Z?a zxnK_X82nK3116SN#iY7TCFg-qDwM{7TP3$ju7$3JW`TYs{Ytt3X5wxJsepC32H-m2 z*F=IIKo`^nH+nXDSO?Sqv)_zqbj$t61IGib=d!o;1!mqczlrOMJ!oe;Iv9mH$GZZ% z0?faozVE(pUzmn@2h5daZM2W4kB1&hUdzdt5;h%l^mOzD905n;;>N|R%&W{pZ9{F% zqnbxGhPLS&_`IL(dbaCo%+(m?4YQxz$lS=>LD@lB0bZ&xsxc~R$XKIiJ|=6R)8Xf# z&b%eG)^9Pp8@Gwy;!DQIVGXPgbzf$iWl;Qs#tsA8%j)hj+U{eP|&-#7IB z@%+K_9{Y4Wr|_M17Jk69hO>rwV61Vh^ej@pv|PVj&%F>e9@G`A(XY{0HddB2Bn_ZL zm;v;HUbrS+6Vs7t@Q?l3FAVfWd}I8^c*JzX#2gQ1hg{NL(zaH&R#WdyO#`;V zx-pq!GGo25SaCm;v|IaxeZq0^xR?fFgct$iiINI51)Bc){`wLOY-j+eSzw=w^^-T^ z8!7*ZeL3n39)pkdAET;tNLsg6=~wAL0u8})-E%2}ir%5B`l?u6#$n~%!qOyMi4UZ; zY$ft;GK?9<7-aES02e~e^y-k>A$2Es1nQ;LON#=Dfd3l-s)CB39C)4jI`ttq2&`Z` z$O2}7*a7K(e@OiSZk^GoJwf;X=#<(im3uYBm!!-ElfgEy3seM6kxTU2_}WMhA&ya| z&MBQ!9+)0TvwPLhZBGNt+dz!IahYiuPWm-X)GTqmWp>seagdlHWJsD3)_S%lZBM$f z?Z!4lcdGf$;Qh;589jBp4hswm3_Qp3_xXA9eo_f4p|#jrq}FK&SRt$sLb{M{E#{|9 zrRNlV*azYRY46hxvx8ZKqc11}C*gO1H4&Z>E`jm7@%Vp+bhGud^~?)o-;(EvHik9^ zp2>N}UkT{d+zEaL^l`GO-3(L$?~#{P1M`R5g3(|apwF{EpzlcoqCrjY()1El$BQP` zQkDWfSAQ7)kn-!c8@3ys>Yq{;qvzZRjAhWWZ+uKVhP$mNrc<>+l`;dW)-fXMj2!?q zJ@mCQ|Ch77sQ2y+nAI0jg`fyLsbanZ*M>`gKig7qsW?$FQBhf0S@}SCAQXy);$+P5 z=q2_NN6JUa(T1_sDXvpoC9X;w^@LvuUkMjf7gX2tujjKzK%Yt#YZWV-Mr~DXRTu-5 z?{GO>JWqcM(gAhMQTb8%-Qn$v!-S<#SH)Z6lH`BgPlH9S`o>I!vymg!gK8Y_3mrMXGh9oHSq>!E3jKXa$#PRR{1$E>ll zv6D5xTtHn8dqdRUbOOxXIgacj_Muv1PCLDnyzooFQt&x2fu`V9@Rj6yVh@8o!*j@0 zDD)P3sX=9paJgl<<#qh)c%Hd9uZp!(p2_Zk0&rJzSHt_AGvE%X52-U$87gYs-Y36L zo}Dy1iB06OQDdVx`*&L8w8(wY`=T$kzSKGhqiC2p%)A$5fnxVOUA4d&-D&to>mZH()0>uP*DgAT;9&aoryJ+HI$z@K6Ai+>l%PLBJb_*r5XYIp6q!iz!!ybfR{Cv{`UTM zo)?nfzuAXu7HXgNDfTI-D`U-;pGU=_iba=@BXA2@P0TMj?K|yLc~qWESEh?K^<~y& z)}vslbE#C16_9~=#&O0mz&60v(Am)W4e~>%(WkDvuCuQ5w&%9zHBJb-aFothXD208$%mIRxmy=J|M%KgwDj**%ud#3x0)pL<7NRzR!G9 z98(+_&I~8}gwzscBcGq&3(9ofik6C&TGm?D@x|kdQ+z2teqTJBMCV86H-q-nsdlP& zC+$u;g&eyvt}(7eTcYi);;rIE+>1DKtU30pq_2|R<-g1C8{aqni^MMyM+zgQnknCV zJVOtFSMXTqSSSj4?#$>eLv{h5ORm5D0MEwE8BK~$isv=GpL9RzNb-?n`oi1k+Ci}} z3n%z}x~uxDdd^CuR_~Pjl$`m$nXNNhQ*XD?w$Vn960<0{m)Zah0iK_lAZz0=z_KXy zh&TH-`?dn^huE`@h6Z>ZpdauXWFtHW^enIkOy6D$%t7b}Rsi;TuY%veeee)G0p&`{ zm9zoe^rPfe(g3^-y~WglyVCrE=F)_!<|X$+_auK{w@|mxdt}}4zMy9tOKyn!N9w{? z_*eMpUn}#M`PuVa2sQ*ZNE!rs%Xpo~qL*D3C=1XViE>Ebu>Y{1ee}tIzLrlzpJLKj z)e!H?Kfrp&dPgV_lDuMbOXij^6R0mT;ua$lY#g%cKlgtw%}l{jUM3U^#h*Gqb<$6r zqs&o0DR@%AHMBlv5ykpr{jYqlBroO;=MHCIXqk8|)1%U(ILGFk?VPO)ezYN&GqEqQ zPs-os*<)zQP$_e#1i#*g-ebyjR4Ab&p`?6i`O<$%{*iKVu4Dh*sH9QJNzkvfUuh=T z29|?)U=s*{gWxC#0si@7&==GOW(Y5pfc<#(+W$okP5>+gd|ti>g@9+JD}cRyem-wO zy-+=A9hYM+TLid|tjGP}I%tBK%zgiZ&p7oGYk&vvXPE=o$LHU}tc@6O6|4=d#TCDY zWS;HP_tG2M0`{qEhH8e6f{2oc55|15do@5}GY>m}Dqo&~xA_HQ1l9;!BB z4gmM}6Z8}G%n;!|kv)sZlt`&pLNUM*z`7;(NVUOg(`u=vSl(FPcvpW{e?oUcH&r`T zyHVUIo`E0k1LX%wj0H$)k=-JD2IgCiF^@4HC^}Fix5};0Y|m_bhjhfOsvDTulx@$p z=h$*=)4(R=lrv9k2Iy<=Yu{0{qX_ojobX-YyEtp9DdxPu>SAf0(>!Nk^1|dtibsm) z$akC+Gbv^eWqHlr+W_da%svmCG!*MzvlOU#c{k;cO`M=~Zb}e?XPswZfrSnVYJI6Z*XM_zz=0%s?t!!8vg4@t(}C}czAs|Ui)Um} zhqef2P_qtr&w0<;Ti#p#PXA8N@00te5v~!grQW69d$xPFW%2Kv6A(RC3t~jqa zbzlv2fvGrLQKL)USW#S2oYJXuS}{|YdN%sQc`aLFUY%T!3(J(tl!M_Vqc7-%|AoKU zRqRsvRsMCDna+2~=Ov$){2ux}#GJ0wP-IT{Z6=(rol)ONy*=bBE+{bbyR&p~`Q#(qQ0+gjo&WFy2_{##$ zfq%duunB!P)?27$UI|#w`35kHZ%$xNU>ZD~tjjD1=?Jv;fv2Dsv_$p5gwO=2(|-)j z2K;X=08NpR<;Sl^^ep;+^}qGL^^ORQ2+i`(@~;+Gi?jW+k$m}=pVxJ6Xl^J2`Egww zT^#Z7KW+|f4*eYbIoJ$dE9Rxd+vDw=i+){wUCsI;uVpuTH~W^@EwQJBQ<9$Y--Le? zR^+V6;e8r31Px{ITCq=X9ne>(0|CJE=OcInha(RduL4ESojrHX%bl0oIJt4M1(`+P zX}^rIUwHFc2aK$%|G$1H_fU@0c-ac&^@Agr%*AHtkl zYR>sQJqD4;yX9H$Z@^wAXa8oqvZZVl)~u){>^L6ueEQDRaY(M7G?#pF5mR@v=(PCtOJ|;CL^{(lz zRF7G2T5m$D&cJhCq&8C98o=T#)w${I;(3t!DI3_K-Jy-s#p#*=>KgWgR=QT`wrA^_ z>zhki+3c152!h}msA#BYpeBLqC2J?tlYFWDQp>;do_J5>dS(S&M^0!?VDe?MCQ(cj zsc~Ta8cUgY1#{0iA7PugP3)`gtL~}nsXVGWs$zx+O7hxxun75$T~%FGoC(uZ)l`+G z$@eGPv5ZJ2k+d(h-F`LJcN^1T~o4YSU5&2`aZIs;nm zM&3r=ORh^&_C$;=#zxJzU>EG%SGjyHA7{WG02HNNKo01O(2LNZl0niw_D<-Ir13nA zT*>}`=YU6{N1=Ho^Gae%V@nf3Ily&|n=B{T3m$bnXT zR4*wfaDsP&_dK9Z_6DGap*mpypaI~sOg}rlJ=8w`fPCOpo>iWA$e`mh#CqOkvHVI%dT2toyJ3Un!%a9Oiev^S$#agUVprP}`6U&&xd-f$X1@yp%j%Q?B8BH+%=}F7>3%ea(IUc>lrFl&;<(m?7QY z+21+5czE#+>kcdTfMuBD!?W@So)0{{=e7gZHLidcpn|UgTv81r9~HBd7lW1HJ1`Lp z1-(H}z|Wr=@tT0^-&Vl+;4MK8avzu}bJ%@Y@)^*xP7TvQ=RoIi$8jVb^l~JC<$!gU zXYeypYi7f*qktOJ8qONdmEan90a$CJC+epArkiVXGI-^A)$36`*UUCy4m~iN%SP%zZ1-lt{B(9wa?Ty6L>>d<(cHv(Cnz z?rZmJ_cGTqmkGt@hu|FYAnJMRd2cb}Q#dQ|%+Scxh+76p(}tL>l)+#gm;8 z?MbP_qfSGs*Xrr>X^1&i+#l1&p9yUd=WeY+&J$YB$|b5LsuSuH>aD_7p`qANoDQ~t z{onJTkZC^%3Qb6xjsVYY zQN}2#k5gY;Upou=_|&KU3fKo43J=t0U~ZeaZTL>(8G!F0ugPno*J?!ah-A(ZOVTIl zIs3VVw}qGQih5!_u{60fxn*3-IO_lTT&{MncC*IEcRuT1eY}0V88+a{@Qn_P4rKYV zB!4~UFR-px9@IhRYF*?evLDK4!00eKn8#&C&Medwl}%3S!L>>x>*q%4nq z*(A^i6etUnGm>XWwb@njRq_qU#$b;95MWhWl{v{d$+hLRhUBbHr_OWVWR@z(Pt*|L+ zQ_?-_JuBBqY7fd1$`YvE-LKrQyi;_ih`KrIQV)U(n9-eW&Nh1-9tY1X)IeSVGQZ5v zwdc6&xa$+|CtiNuwf(gvO*%cE{H&=rvjQ6ilKAjzg?ELQbxy8fZ=7$O7hM-!dz^cu z^JPVAMeD5SSz5uR+D_{?Rb)sY%7!LY?9$*L<0Qlc)3Mv4;&%;49;J=>=oeKQ~ zJ_3*jgyR0=cF;Lak;>LX+k z!~P)U(vCHcHCHL9Qt&?ReVi&;m7FchmYoyNiS+1ko!lYr5C_2BM_*MPbshB>VT{lM znIn;yyS^rQO)_;7tnW^=Otdi9eyVP&t{)skqF*A-x?+#k zCA!2vlz%88aaMeSd8W)8O;MyMIDZ&zSUIy&n=6|uhpC3CMk8}I7PG)|kbf7Wi_z7_ zTrKvUKQ(-cY3NG~oxy(netlh#fqV(rLbb4=ip_=QLXomac`o@}a=JWSz6@FJ%$wS* z*sS0h{JHvbHN6+i46TTHn;DpSG*muR-cr$0u~)fQ$?xg9;<}TjN7w!%kP%oCa+CiJ9|6(efNELm?zA`4CPN8 zpEw?wADM@Px{kUI<}tkmu-19FzbPp$DIRDZXpXc;+F4_N=XvKzL2eY!?L1epx5D#F zBjmR4L#{Gk0CO=XssrW#pK_jZa=*g84|Pcy!Hgh%N82!Gp{~C!c1*MVm!MaF<9Xwu z_VkVajsGRs8{8|+m*TaQAs=E&^pxnz@WpqE?-c(4U_@XMbxpSvw-l?yRZ`~N zB`w$iJYXSUeepf!jMc#Wppk$&K^N!^jv=djC};|J59y24TS-P&jF5QnqbVGEzHM=!`V@?^(0hgdZE(gB_d#dGk zmEXmDtA>E}w&mNGZ+|De6J`MlIhoN?Q(044%~s8}4YLhWff@b^o}uRI=ju71i?e4~ zCt{C(v~9F4J1;x0pQ4`vrFJ>bLUjx3N_!u?esVdJoaZ<#awAwTYl3+^tdBM{H8c$~ z3^Q1DR^1F}pXVWCMK9CKAZ?19wPn_pv$~fc1F1 zEr z-ZQ1Al$*iNf?rR)E9-}=QdXtpr{<@A2R=7_Zi+R;8q`L$vAwCisSA7%15yXTF%g?e zZNW|bP3beBCXD*|6PS6#eGD~~tD#w^UWJnT9+EX8Nu3uJbaV@saP5?@ZuKpb*sc*7a_%Z?M;N*L0WnmX~TNe>?tm zxQbjwkMbVn?X>N*Wx29k+}AQ=p87BaibC|2QV;eDeVTs(y_58m4h8hVj`5B0eUBWw zAet9^*W<5q?ZqrDzCWS>v)gzMm}H-XNsD8o8SN^k3R5T-I0pmPlXrkw_E~n`C|vJ8 zg-^97;KR7avBseRJcs<|{>?oXxrfwN-tgS;v_?kP7S|TnNzX}--Dmf$gx8e)wtL95 z=Q)LaDFOXS?g!Y1`3!KT1J70TZ3g@S{}asOP79?;J$M$y{y;WbO~C!d2;T@F`;B2> zh<%8?2Qt3OkfBLGG&9jJz%$Aj1k)7L6x5-*J#G(cQ1t`#18b0hK|deQWc=)JyKcMa zrQhM(A)R3(p$ju(Fb#FETw^bbmqpfb>LO2djdl&HDlN6wG}kmA=s(a?Tfr>4yZ*cW zAbvGN?$qVLvYs5asB3ce+aXMpW2?<+F@qVRm@__DaCtFD3?x1I1e`TP!Ve zTjoaZjNaKNsZWv?0WN$7nDN#eG>C5ykL_RLTjg8jKyjeB33ITyzNRWul{v`N`W^7B z$+HZs62hB=Hwodoa9t014|yTjlC&jBqtqy;+NMhLxw-DP0Mn4)$jpgq>T2q7N#l~} zxxb9;y&TMF+)})ycrj*c^IC3+*%H$`uXo;6>s9Md-k-cpF-t-Tcppr5PIj^n)X?0} z%-KdwJxx6pm&HYG!y~|Oq;Sk$U?%8S(EM?~yVkSTGr~Q>-PP6A)fX8A3WvgxWzWJi zg{zX!eSmX-b2?yl$Xmx-DSLn#hbFEju0eo3y0w7%7<%H50?ugQe_IdH*E7J364qW& zLPT2RGTb$f{2ee8rDCw6bT6@XO#|Re zVvVZ?SmIyee+=je=iZ!}$e)5g1+)Fxe)bnV;2CmjSdY2|_{^$(YTqZAwe$|~9A4lr z@HfM}DxNjyCD|a~Ag>o$FOo%k=9sSZtn^%STyyko)wk6nVw zvlnI$j~X7;H?wbMcUyNG*Pw04MPq$70-V4MX4WydzH(pPb4|}Ryq05g#^&^~_px)v zGS@(U57bYv*URs91A6IvjxU5Rz^Oes#OHnwpx>DLc<$+0@7wO*?$34Sx<9ghWX+As zjf+#nDJrQdsg|mjs-uNyVF)m(OsY+?O|lwAHHwNI#SY$Y^cgZ=bw*%DK!z$E`}h3& z{sR1b+56=`hrnXc1#o@}uNA$_O#yog=Ya*#dzb|lfIfh_DE8r*Q%Ri)b1k_JYyhk| z@mo{pUX`|=I5O0X@nAkBfQW2@hR%ff}#o{@m0_Mj~ z)K0|2X3oT5wi#z?^IB4K@>|hw zMbnhilz*xJQtwaRpIp^Y)xbQKI)Jr~6bK#Zhdu%5FXLV%1h_9b4XDTa9mrB-DXjtZ zh!^0cqsMIn;C_c+$NJ9!5CKndH!v0WzyY%`3htSx)UUv=;1p;Ns100f zT5ReC=)s{E{4}64lGn1Mx}*9R;TJ)zQ|qWBpeLk1=GvBkg>ehxwncA? zrYZ4&^?;T8q;(1F5|-sG%VDpUdXx&T3P^aY>N`fw|IOIKOZ*Tg2sYd6r-fDA#unsDV6IzHhk>tOh(^ z^#TUKy%hIO%nsqcg!{97fbZoMfOYWB$l+=Yn7364R0Q7xW@@rVeI2kC-wauV&B1d} zFHjE&2G5XG`5-XWH`R9p9v9YysTmXlVxS6iV6OwO1FXGp-d=NWbMJKDbYB&CL%44D z0YPuj%i3m{x6FGAdaYRGty1U0KRfC?>QuYbu4VAaum-{X5x-_+U}RuJ)P|@yPn?Hm z%a5HOJ9*B04=qw1M;!>ubK_yeqddhT4dOkUalzV&5;bwjF2B23wPn)M5qZy;2-jO(9KWm_fOK=H)Y!QzL3b>JxVAcIx;8Rg0CRzUJl4%cCCV8Rlhg&~DIj-p+jee0@(>Pgh1FkfxaPPxpJT9hu=HFuM!tG=d3E^_;fRm|ciD#I4avt<$5r(d^%OkEP8FsKH&r)P z>9Ta0J=z}4;u(FFGsGE^#)5h6oss3q^X*>UUR_uCXqG~w{0?YSv?+bS5b!CWrkUR_ zd!jsFW&_C^j`_np%f29*#GAL7q1J+zs|3(3V3ei`v1Sb|BUI3q_eIL&tp4y90Zu-W&(Pp z>78o^#vl`OkA9E-Z^Pe)w$M9G0w)Y73_R!V(CNFm8&z*^cSdP6yLIrCWLCFE)HG~I>nLI+(3-8I18agF!vO!l_dx0!#$)cF*X?x+4#A-=Ru`8qEMNFf z&ObSKH{abnw&mEC_p|P2QNPPx6xZcEcb>ZedP(=ekG>y$oF&V3llnr|gXQ3i?+lV} zKlHIaO`ly)(D6T5FQ#^Y-e-P){4<^fkAPCZnl)=^#emtx+-D=E#k&QtzSi8++%w-j zU-J4*^G=gyNJJp7;cIvT?_j2Hx;@>#8vb(D$d|g8x+}RVxgO^|&fBBfqhh^qs&cAw zPT`zFYR~!dl%8>9<_y#g)P1Y{Ry$cWS(PfLN}14nSDN8*Wqu{ssGI7WYK-<3mM1Sy zo`h^sX5Pmp$0mDiUfcPc^Eo~2J?z{=W+B^+`^CxH$=aQYor<73sLr?MTdP~DTbOGW zg|jz}Abh5IEx)&ZZ_NbFT+LiH12v_rJmw??U?~5~{g-I*#M&#=$17upJKd4O8DpY1=}y$-K~Ipy42Gpn3? zG@jqNj%h#4Yfjt5Csd+Y> zADAC_;D6v}-?cNe);B>X!2IGj$b}jVpB;PQ^xv_6%xl@(+uM5@SXFr%(rmQiy0`oABXo-7?oBe9;PrgP@)g~!BDfZ9`j`RQopFWs7e#k1ilLo8V?%Ffjh{9 zT?(H`g`^5eWy&%o=Q(gsf821~z&%2NP#|S1{iFCtF&o|+>PMKz!R#9Y{G>B%Gi;;s zN9A)*O0BqDEmyBmu2J?c>R)upe#yQme^dSj{RX`co4>CMzAm8On^|n^b+aBj0$%J+ zLMNfjQRcXU`4^ip2d7*?xq^ydj&+Vy!|Q@v>KDj-Vx7B+r;3L<2WlnCz-d6g-BaIF zDZ`T*s2!Lc73YhSGU8T1+pY4ceEr@1r5On)FfZBev^$w&%AVT^&k4_X%*Nzi@lQ}c zSYOg9Pe;xebtmjqtpU`=WdUl{_#k)Q(WcydI1L^fU6T%X+|p zz=6PzfPKIZfebVRe_c`~w^fWgFtm#qDZ~zPiF9FwHo`GK@AN?w3 z%S-@$F_VJ(*DIbYp6AZzPOis4Aq#f{7~>q{WFO-+;JVZW&mC%Oe7v~rAxs12QTiL&vQ9PqKD>f_kQqHBEHR2j^u5qrhk-m|heY2kMG#d>@ zgWM!HB>>jTnWgfv@nho_!2JQDL`?Up^MNlKZE(aHN~1@FQJz( zO+8J`z8;^W3ZSy4vc>?qtGlZ=DK{yTF>LWFI0Qz4czwK{zOGVDspcK>AUIF&Cy)iG z{SuOdB>K%7sO8iRvE$_Fyeb#+e>a%7dR~;jy-V1TJ*>~QNNkZf3)ztykh#e-8}D1rm|725I~f3WfN_BOL)OVxfkKc8Sa+EKz5$~F zzy3bv(+&pwyOe-4UFbdGe1dX-_jsl))0S(=wS?t|<nB|S^D;T(o4!_R6`Fx*+G%jYR*(zz{VEc8WRhtP^Hd+HKT@AnomDMUEK{_PX&=M=L#ij$^WOd5J<2u8rFZI`g8+3#Tj7II`jt>u zmh*3i#)(<=8Gr|4)}i<=ynz|;d}p$U&7YAvPh~)fN@Irr>r&K#cJg-ex?OJ975^2< zXWRyvk;9?iWRB^P#3PAu>Nxcz?F*1Oir{K`)DPWBR4iED~?iuV`5 zYl&Bfm433(p3xpXM$V;; z1JiP*TC{^dM}`!0On9DKtY3`N(;hvR zAua0%FSIYT6QK2fWPD`gneP{H5gZ3Rk6i|=&$UQxk-7|=2X)fwr0GF_FbVwdA77?@ znU)N0AR~q~yuVWZO6dz&yXpx_j3q|usw)BR6O=loZh>Zj<|*=)>DORp9(^6PK~Nr) z4^A4Kv`w~6mIf*%R7z+dYalxh>Vrjbi{gg19NIE-OXilch_VR!GSeHUH&)~Tm<=G$ zvmUe^wDq?3wvIH9G^aO8Z&WF!Qp~};gL#oVBX_=*zn1S)?o&1snh9J3s4e5Z_!;(ba=KQf8@?; zU(G`QLjNML6&a=6H*pU|9R~k9?~pUk`XBdCS)r`ZZZHb-WT;_ft!}q>xA(K)XF(Q; z_lNd}8oP)sQdGJ^wLyzCHtJX9jqtcx-uWp^tfVaCFcU@<^&8uBFTySd_OY zPvup4&j-(A<+~*9jrobu+MhLa7fPK zt}s=YIES;Yk2asFKwF?a?L6%qBn}cKfSjgpeBb!8z1iNg?z8SjSEFmAbE7i_k15C| zc|rez9`=pK(WcR+0{B7Ge~;d&r2yAMR{{DUv;v<2)b4BnMgy(pR(bAYE$$F_!`F`#~_!=rD{PQ0ESErtoKvt zEyt{>hXy`0FrW=+ztw-MZ_qSowz{^us>CX>#oyvTSaGmoo_3zLgT8~l{eboZ*z?Cw zDYXV#18T3@3tO9{JawMBi>r%kM_`BSUCko@D&&AtYkfj@LdVS~xpBiB!yKS^?A+$u z=A=m&wwmUsBkE{&Hal5A-0j%y;68A%2Oz zuFzNL>*($1{T7;T_UQRqevUi=)^x}{_Qt%pQjWyR8_FBVjhhGGVP0>CfRzm^Wgp@l z^*ic2HguFVtDn_>R{t%K+>qQ*1@Q4S@Bp9$x&s}6o&c}u3BYtqK*a%5Y^^K!d6o*Z^?fJ3KgC&Vaibxf%i0A*QHT)bEPz zim|r87`YN}gx&~ojNsfq3+M%Kf5v_*>*KtyIQQ|M=e~~lBjf|MSGHGLpe48j9QPmh z4+{(nP(#9XaGh8uE`YbyZOHKZ2;g(P0xmt*Oss1a`-}YxT?<{?0^0)RL&}GA@^$jD z?+GcUnmud2mgGjaB5VFp%cGV?cu7_x4|t$$psh?O6X;dJTyuJAk$=o_G*OeNX+<`` z6=W-Y3s_-Vcc$!2*<9~jFFA)>;IYm68f#{J&;2U<)#T%r_)2_Ehtttf*-<$HIq9r> zhs+`KW#O{mMix8uED_*$K$TD>RGKTzZy~eDX0RD%=w|4qYo}{D)tLY@-~o!Y#oErg z&bl<<3ur;EYOW%wri-Q`qap)saE({%)q>)vy9@e8>Xzt@#3IaQ^=37-bS1hH-7@Vm zEp?T}z%=Ait<$g5pMf827jqZ$Nz+Laz1+$SWd`z#UBE?T;O;;+4toUT{jN2vHN*oX za??MG+8uHU7aA8Dm+6-23epSGGqsso9y{AI+p|)cud_nB6wUM_&)HBm&VKe zOOCHg%uCEZn9-7t$@x1jBQ9gDXRU`>MxC~G+BU9yT=^yVSf)ACoK>nS6<*#!^qr}x zt*KQEQVhBe9ArMHhb<3V>a2BEvAZ|*X4A$wtR5?XeVoa z63vO`T>yREHkdY;HW@d`TxRN2*>^Y&d~N#L^bmYT*b5k|9jhfbzOS*b@dKb#U#c%k zElNG%KH)y+JLl_F1HgO3qxa}9q+Up!=mDVVGQnq)`JykXUsTiQ<&yT2Rr`=Dx zXZdFNY8|x>*wLwvnvR-U4XuVAnjV_cl+u*R-pSrQ>OJa<$laj#3k$Q2$Q@$Oy1k{n zg&f!ALzlx@^^2hsZ4+(m6?1>S8#+7IrFKGhJl8hYHUK*0p75sqJHU146u@XTT6tem z6SWcC#V>)?@JSqH7-i_I?5kuwtSX}_BUziQJ#RQ~c*6LE@eQCSeD_0dm>YU zH6HS#sqI+pUF{v^80Bc)-nu=0K>UF6{^k8ytGKV?zKYpZv#ZY7&)9h_aqREt@91aU z_7UXxE`fIwbD1v!tm&Bm`qz~NOM-7mZ%AhXXP}(x6c{cJ7shB!m+$EN$!{j2I$)g2ykct}ZgN%aoz4(}=7DPL!QXMZIgjqvSt z>0G)_$fCTX=8l?W&SlP%?vw7vl#eO9>$~d@LjOn(|GDkwwo4$mEY&U5ac*b7ki9Do za9xG2U>JWTbhmW3921TS%T3GWH3(l{`n{oLl}WGMLZJ}V!gAqj{nz?t_%(4nW}Wzy z0Z^Y(PtZ=#9y1&>@U^^6xJ~G6?kx8mlby-Vx8Vo<6|$6PYG-Pf0$1QGxFNJ5L;?VL zF6^@|2rUSG1H1*0bCw&<4YMZ9erawj7c0Qoa$R?NY`V;|;eK-_upHO~>;(P_@N=#Y zv9*wSHfNEkNUsF$y*MA`0^ECl3cpw^59OXTYY?1|sPSS=j=kw*AQfPLg`AvsfJT6| zi{AqyfXx6oXqN%bW84Eagd61EHF<&00keU7fS%!=Vd^Y$0e*Z7JaBS>rvR=iFpNkO zf)j%GNA8boi*7@jVpFt#tbc4Id{A;jxgmSl9wyhNu)a{9@6*8<;$EOzpj#jm4Miid zi2VGi1t8yrnuLfa;;BVg8NCw5N@JyJ=y=(~&aBO>Mf(Ju?cE{nyH^`l8-@mk2Aaes z@qPFEZq6ww$U5h1NpdhXK=T6g0%%#uJUQ0yx$kF>ybt<7T$|?r_%qPqzAdyrxIf5x z5bO320$iW7t~fI=Gmz)Y^Ch{GTqkNy)Ff<9*t{lbO_Cx-k@8&fbIGP1rXBR|J3RRC z;Kk{S(?_XBsg47u0D6PIqI^Yp4cXP@{mT1+8fzy9$&S!?*6>*inJi6~z5<2?hXr#1 zdgG)6>`UnZW|Xf2x&Ti~Pf8iUMj#`YfwbtYP%#}wLd@DAf5wLZKPQ)g`?1df1K^aL zvgVc=t`)#l;2B`Ov|eJy!%O~`{M@IPg8P{XPI?-^TF_Q_SFUldao-`{A(A`zZ0K3J z*0Ia8%cF9r97h5AB(aV=3Ax0Jf{TJR$Rau9JLIEY?@j4VsmNdCr?25`|7`zQ%~;LI z+L5*6g5!cy!&Aeor9B>dJk}Cw37w0bi`@_00UQ8F;%Q`+^0lO|dvX8b{-D@uzI1%) z_z3yN2gC#71F8p9o6|O@wehs!)>5vA77i{PJXJeY_EMbYp5`9u9qD}x_^0cit^_<5 zz^jUyYx1;w2wiE`Hfvd99i|?prf>W}`#?L#S8^am!E4zObOgzl;yup&9cyAw0M!8R z6W0C6wPX#kAK*fMOZ$gx6eNFfT}5Ki^aPL>*yg z-~#joTYXz)ZOB*tul&45*avWioS~1ZKdOGp@RY%*GwK#qE~?~pDue|jPPYk@gwM4L z_b9E&t;yu+K7fqk=YUD-NowZs94tRr&b=A?sNCn0*Fn8g2S*3TWaQbKpzYzfpoX{N zA@~YBg3Q_PV&BPqrDR94Bd#H?p-p|8daf4=kR`)D0QHa?;SW)U4B6X|&ASQuCs>af zsN3LcNgqA#;}7W$>DX88q3)sfEB(p?=?BtPN|o|(%Hb5F*XR{QK`g5)tK-)`sr;mp z*Ax47{iJ>p=S|jh*$-pyk$n^P!pLjkK8)J#tWXxW39=?r4Wj-~U=Bc?5qo-kUK~R> ze&325fN6ng0sfBHov%BGR}8OMm9#49Q=mDyIhk2g#c9Q9d>@|>pKy20-8FC3zEwNi zIow%PQ&eL}HKbzNRV_|goWlBCKW{(p7|$3Fb>~g0Ce^FjS7i;%oZ30HOVEqz>h9`( zL-~f1{wdV;K^|hHS5BHI&9gXhabk)#MT=6L`D@{8fqmnI%7n@|f1Ljrcw!&idT?tt zG86tw_%DIIHCXAoI!GO)cf@zZqyD3Qdb3iaz<%Ag;ADFv-U!#BtYyb1#V6qs57;8M z$OYd8pAZvb7or!Utj)0(IU+hDI@>oFdLFZ- zdP_BibT9ip_ACj;b(v(NLS z|4DznyWYK_d_#GGsz61Z;rGCosxMW!%3QfVmyn*2em?npvIK&{8rK>Z^K$0<=KI+9 zKJPy7rnl}X_bE5mN+ZG5O$MgGEpDD`o~utnp9EOIZ)Yz3N6^;h19{Fo=ZD%4wMUFc zjMMbf^xUI!&2<^MV>z`swLOs2{9E&H&4cxW^{2#BA~`mH3H~K$h4J%_v>j>5#$=;g zY4%44)sbi^w z_hp^0&bI|9>|5AZ*e+}z<{svDAytW$hm83#-Gc z!^Q9p>l5u0#qH;ftBtFr=Mv{oa?P8-#b<4PIr94GA)#~Xob=k86qpqFJ$(NQ1BC&u zx2VZz^fmgZiv>L~a1b2g4*=Hm*qc6)bRubzev&?v6iVu*>Zbas7d_Q8)iWKy6yY2m z7#?7qgnUHy{8>Zk9qb)sF2V}$3hz{8rr+ba$1^^4d@9c~>)egijnxwa69a#R#-3wm zMW`aQU)nEmFP3M|vu{vsP}S;cbz|Z2NbTS}>pWSTLtY>GSp@+5pOc?b>QD8vUvU_I-2LFk#<^p3cy#z}fF66~Di{HNO#gxbkpXi3 zhrNpS@b^0uIV5}HkyA7a;JxF55!3Cl+hy+;UZ}BfOwKmt^IjNRDA#&f4-z3}oEDk} z6=wesYf5*cro;8;`;qq}oU8v3{6nxGz%}!@*f@D@{w=ud{;)s1JhU7~P7AFE@BK1# zCtRm;T|O;1E!b1+DY6H>F}g9@3%+%o!A<=%@zca*>Sb!`b$e@iYu*2bM{l<3#?*`6#&i0B$U6;Nt0u{JzYIa7IAds&O9;B;malvqkE2dxLK zKAX?>2r_DpSdLi8x!z*iVtYLE@l5*PmH}M*+=fhSa?vkjUXb&4=w-q&_9fj*I_k<< zLm@w~7IMPLzN1irU@ zZ>t0*0G$Ey7fS&4nmPgW&84sUZ-6!cJ=fWA>krV&g@5P0z|+7vfO>$bz<0uT^4IPc z?neiIpRmNT#Ine`$hrwAvJ_c#f)31_V!6I}+$PHz+zNN*?z?WEZj?KtITN@K$dE z`2D|Y_+0~aWbI<@VipuuSw~<$nS0n?0G5X=Hv~ENa!k$-=Z9;c%Qt}6#X5BpFah=V z2+(s^&s{kHYy8WAHNc;N1wbP3Hauzm2vE~L1(*c92s{dO1xo8n>sJB?fRPO&(Mew= z`@%QYH`eQccEC%(y$$!uo;5iQISm8q2gqX&>)KxdZ5!G)>;uGlvHp1Uxa{3Pf6`0Q zOVNJtO#CZ6*0;j(GBuhST?@WB_cO)F(Hv7hroQ{G?z_H+KO^V7RsL0e&I>DoD}wz| zQ(zyST!5EVFRQv4x*49Ye7-WvkYyl8^}*nS!D3CZW`=2oiQG&23zDS7{TF+a?Emcb z?)83*9OoU;9nl`(P*KOl`;uOx5#Tl5YdZFusFirc_K1zOz$|l?nLo2msN-^3U>|l2 zFiDsMWlOs75q!o6q6?O#&(bSRN)v0SldO}hvu(3&)VfXvcs&LI>Py(et_L`F*Z{o5 zwl}SBTG2YNM1+XYXl^u*G>tSdTWv4$)~GFQ%4o_cR2C|gNy;R{PQy;6L+Pk?SG$|N z&EC`g(|-2)=LY5mJ_ZVQ#>1 z7;;z0|KS*W4SMlC(4JB&H$XW+NltD_RY}!+?|eDqb`SDC@_qTf@pa>6U9{j6d=>r* z|A4@Nz&Q6f_xs2PDG8JWo^U_mPC+g=Ib-`A`y3NeCZx=*np=e_#nsK(&3Q(3Mn$c+ z2@emDZ_YPUZxDwpXV!~ZD`$;SiF`ZA(+%(G-qT%CUr}%ByQ%Mo?hoB3;2FtU?Bnp| zX9gTS{K;SOieB*;@>ka)e|O^ciQ8Yxcr7E{kZxe+v8+uHJ`qZhZB2dpULY5FS$hFq zJIs+-1~4z}Ie^zu9Y8)A=NEDzwgCLTB!Jv>;x_;_)(HT0In$BrOb#XW;2gWDaeo3? z6-R}m!sq7C&F`4r;a1-SiMS8@-nQO0o}bggX@NQH5ABcIWd8r%q8}+M_S0yimzc}Ywlqv-B z*fzrV{s6Q(*~V-mJyQPR{D+hGWpSW5z?wyGWV>=*QV5g*7m*3de);pE=jFN`Uee&} z@E+*|^hGv8wNx#=9eg|ZHNg4}wUOLYUIfTziUY`FXWfdu)g%5R{uWP*C)=6rRCH5x zlT?!GtmdqytF9|sxjNR9!kVz=^^DguR;X5}N>WQw;}hc(Sr1=oUuyrx`HeHfli^7~ z7Bcs;^b@}i;C@Q*3;s%g9!!v$1kQ?Q#Ug<9C2FL(KG`5`K-H~OqW{V{@f_UF%H_H~ z_a@w%w@3C2@0b1V{qF9b?w&o!tLNhFEc7>Ar}OVVH}Kp*?!WjuTaX2|7yQe2q<19l z`4Kw>+{uaz1)Kx&$7m^vsKYiTzH{Sg0<{7wo25z2#n`hwW8TfT) z0JER``2YT%^R#Y=sL*tT-zMJv(>&e#x_MfklilNA&C|c`EZ_X@n`hwW8Th|A1H8|t z+`Z-ErgjPi-~Q8mo^joMZpg__|5x|N<|Ce6rkj3BK zY+3y#HZBUj{rl(IMUnl(bN%DP3l=`|*u0$VtY7nF|HVnr-@@Pi-qu;;+u}F#EpJpC zzWaF()tbL|;Anh>Z(aExH#~mcQK{$uVa_iPy?$eM>kWr(!4LcS&QnG|MI}=FPa+VUM9KY5H;P?OVz5fm3H8Ds4 literal 0 HcmV?d00001 diff --git a/assets/hubert/.gitignore b/assets/hubert/.gitignore index d6b7ef3..03dfb38 100644 --- a/assets/hubert/.gitignore +++ b/assets/hubert/.gitignore @@ -1,2 +1,3 @@ * !.gitignore +!hubert_inputs.pth \ No newline at end of file diff --git a/assets/hubert/hubert_inputs.pth b/assets/hubert/hubert_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..46d28868e5b7107a209af2ac9b75473575308e12 GIT binary patch literal 169434 zcmeIvO>Y}T7zglioW!o%(o$a2LV-(FDu~lS4@DfR2t`5*!UQ3r2+4BnrM7D9tUa#i z0SQsHQcp#qa75zRkHV2p(yzd-leT%OCFqskSmRyq&itRpziA}p`4FnraAcedmC&7R zr=!^H4R$7R+>Gfs);q1(THd+QUtcSSt*xz}=6=k=#m&Wh`*=7RwbQKBj>A#AyWE__ zUVmI`j?(Q(uiwdc55~h$^X6(To4a^%vr_EHSGKOFS$TI$ZM{Ei#jkcB*0TA{Qod#- zTbQP|C!N+uE7{`b*KWtG@?{c|Qhs|ktLEQH>27xHUb{W)_tSRl4F}`e7o$!(N;{wQ z+Od|^-rdNK$Lz#Lc5?GXp1ITNbb5p9&F@>|8`%rpyq)gj7S@Kt{*w+)O*^=po!;NU zi_;FyJni76y$)W!*DVUY60=t~ve%}ChLf1fHT$hw=_q^sK%q|uPygxCw9aaFcC(y! z*WQ_A=aN#)-k8?7lf9Xg?quh8)8C5O+Z)+CljTNIsa|+=|I)1-shEwK2e}lk%{s{X z%KF;J#W6nj$on-25FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009F3D}hE*sXmP5VK_M#irGlY^C48LM-i67)zF=6 zr=!^H4R$7R+>Gfs);q1(s$X7TyArmZXB@5L2>}8G2oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+0D*rm&`2uP<9~kh^R?R{6tj_(=R>Ggk0vaItD!sDPDioX z8|+NtxEa%Ntan z`Oi&!P%Enc{m1pM=km(MY$T=P^2Ff6qx+X`l|m@yx#GovF+*f|=SKf<(8E|BhLd@A zK9>%J{FO(~2&%6fj@f@&2V)wi=4QrRJ{+?j{9w%KUuVax9*)^BclN-adhqO+y|Zd0 zi;M5=tv_FW|H;mUy}QTP?{b=AoOv)kl>BrSE5+|0e%t+Bylx~5i^a2r{8OyoUHcE) CtWhEW literal 0 HcmV?d00001 diff --git a/assets/rmvpe/.gitignore b/assets/rmvpe/.gitignore index d6b7ef3..dbb24a6 100644 --- a/assets/rmvpe/.gitignore +++ b/assets/rmvpe/.gitignore @@ -1,2 +1,3 @@ * !.gitignore +!rmvpe_inputs.pth \ No newline at end of file diff --git a/assets/rmvpe/rmvpe_inputs.pth b/assets/rmvpe/rmvpe_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..a4cfb8607af1de19648868e84e8e3d30939d359e GIT binary patch literal 33527 zcmeH|zi!h&9LLXbni!WqQqvhR773CF7%CX3B9!HXFhxjF7R%UHvFgM=+sTHMuvAP; zbmSfS2#5u#6TCv7fH&Yy2~pCBh2^_^FS^)q?(Dno@B8(~lXhJQ#}Qj?U)&bOXqv}< zGR`MuCkm>dou8)to-2l{HmiM5Vqojpqc~MoSY<`{zU@z{BrRRPh~G_;H1ex>TxNwo zedMaz!NH)ZhnIR}a1twfF~#ktSx~)MTy#}^VCgYkwKHhwV>pR|r(M-}DehIO`C1BT z$(cMlQ;xnOkEC^`ZhQ*EEKTFEO0se3zAB=)h@%%tSh>o*f2?*Zb@N!=D%+lHI+y2H z^9y}n)4hKRuqWk z|MC5gb3ggV{FDD^4>12Y1<`{&5PRYft`@|2X%P|I9!6kM;obk5d3$d-9+B$GM;U zXa325vM(H>y_aSEVoPyUntIQNtP%s=^$_5kybQvh9i@}K<2xu5)J z{>gu|2bh1H0_fV4|KvZ;{p3IMPyVAl!2IJBK-Zr9$MWBkP3Om;^;yic2X*(PT^GV} zwjJ3L&qXns=CPlQ^GVs6#zmQA<9_d%82*2I?3xw){Ce`XrXOj#CoOYWt>NwK3nDZ_Zz8;>kwi!E)eT r`{8Pey*&GS{J#ACOW(#cJ+EnwUp|^|A@u6>wVj4JxAn)2UyS_&{WX(w literal 0 HcmV?d00001 diff --git a/configs/config.json b/configs/config.json index 8e9c176..0861200 100644 --- a/configs/config.json +++ b/configs/config.json @@ -1,15 +1 @@ -{ - "pth_path": "assets/weights/kikiV1.pth", - "index_path": "logs/kikiV1.index", - "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", - "sg_output_device": "VoiceMeeter Aux Input (VB-Audio (MME)", - "threhold": -45.0, - "pitch": 12.0, - "index_rate": 0.0, - "rms_mix_rate": 0.0, - "block_time": 0.25, - "crossfade_length": 0.04, - "extra_time": 2.0, - "n_cpu": 6.0, - "f0method": "rmvpe" -} +{"pth_path": "assets/weights/kikiV1.pth", "index_path": "logs/kikiV1.index", "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", "sg_output_device": "VoiceMeeter Input (VB-Audio Voi (MME)", "threhold": -45.0, "pitch": 2.0, "rms_mix_rate": 0.0, "index_rate": 0.0, "block_time": 0.52, "crossfade_length": 0.15, "extra_time": 2.46, "n_cpu": 6.0, "use_jit": false, "f0method": "rmvpe"} \ No newline at end of file diff --git a/configs/config.py b/configs/config.py index 20bbb36..af2350f 100644 --- a/configs/config.py +++ b/configs/config.py @@ -13,7 +13,7 @@ try: from infer.modules.ipex import ipex_init ipex_init() -except Exception: +except Exception: # pylint: disable=broad-exception-caught pass import logging @@ -44,6 +44,7 @@ class Config: def __init__(self): self.device = "cuda:0" self.is_half = True + self.use_jit = False self.n_cpu = 0 self.gpu_name = None self.json_config = self.load_config_json() @@ -122,6 +123,15 @@ class Config: def use_fp32_config(self): for config_file in version_config_list: self.json_config[config_file]["train"]["fp16_run"] = False + with open(f"configs/{config_file}", "r") as f: + strr = f.read().replace("true", "false") + with open(f"configs/{config_file}", "w") as f: + f.write(strr) + with open("infer/modules/train/preprocess.py", "r") as f: + strr = f.read().replace("3.7", "3.0") + with open("infer/modules/train/preprocess.py", "w") as f: + f.write(strr) + print("overwrite preprocess and configs.json") def device_config(self) -> tuple: if torch.cuda.is_available(): @@ -237,4 +247,5 @@ class Config: ) except: pass + print("is_half:%s, device:%s" % (self.is_half, self.device)) return x_pad, x_query, x_center, x_max diff --git a/docs/en/README.en.md b/docs/en/README.en.md index 1e52b81..f880869 100644 --- a/docs/en/README.en.md +++ b/docs/en/README.en.md @@ -97,7 +97,12 @@ sh ./run.sh ## Preparation of other Pre-models RVC requires other pre-models to infer and train. -You need to download them from our [Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/). +```bash +#Download all needed models from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/ +python tools/download_models.py +``` + +Or just download them by yourself from our [Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/). Here's a list of Pre-models and other files that RVC needs: ```bash diff --git a/gui_v1.py b/gui_v1.py index fee9e20..3254892 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -1,5 +1,4 @@ import os -import logging import sys from dotenv import load_dotenv @@ -13,10 +12,16 @@ now_dir = os.getcwd() sys.path.append(now_dir) import multiprocessing -logger = logging.getLogger(__name__) stream_latency = -1 +def printt(strr, *args): + if len(args) == 0: + print(strr) + else: + print(strr % args) + + class Harvest(multiprocessing.Process): def __init__(self, inp_q, opt_q): multiprocessing.Process.__init__(self) @@ -62,9 +67,11 @@ if __name__ == "__main__": import tools.rvc_for_realtime as rvc_for_realtime from i18n.i18n import I18nAuto + from configs.config import Config i18n = I18nAuto() - device = rvc_for_realtime.config.device + + # device = rvc_for_realtime.config.device # device = torch.device( # "cuda" # if torch.cuda.is_available() @@ -86,8 +93,8 @@ if __name__ == "__main__": self.block_time: float = 1.0 # s self.buffer_num: int = 1 self.threhold: int = -60 - self.crossfade_time: float = 0.04 - self.extra_time: float = 2.0 + self.crossfade_time: float = 0.05 + self.extra_time: float = 2.5 self.I_noise_reduce = False self.O_noise_reduce = False self.rms_mix_rate = 0.0 @@ -99,7 +106,8 @@ if __name__ == "__main__": class GUI: def __init__(self) -> None: - self.config = GUIConfig() + self.gui_config = GUIConfig() + self.config = Config() self.flag_vc = False self.function = "vc" self.delay_time = 0 @@ -130,9 +138,10 @@ if __name__ == "__main__": "index_rate": "0", "rms_mix_rate": "0", "block_time": "0.25", - "crossfade_length": "0.04", - "extra_time": "2", + "crossfade_length": "0.05", + "extra_time": "2.5", "f0method": "rmvpe", + "use_jit": False, } data["pm"] = data["f0method"] == "pm" data["harvest"] = data["f0method"] == "harvest" @@ -142,6 +151,7 @@ if __name__ == "__main__": def launcher(self): data = self.load() + self.config.use_jit = False # data.get("use_jit", self.config.use_jit) sg.theme("LightBlue3") input_devices, output_devices, _, _ = self.get_devices() layout = [ @@ -294,6 +304,17 @@ if __name__ == "__main__": enable_events=True, ), ], + # [ + # sg.Text("设备延迟"), + # sg.Slider( + # range=(0, 1), + # key="device_latency", + # resolution=0.001, + # orientation="h", + # default_value=data.get("device_latency", "0.1"), + # enable_events=True, + # ), + # ], [ sg.Text(i18n("harvest进程数")), sg.Slider( @@ -302,7 +323,7 @@ if __name__ == "__main__": resolution=1, orientation="h", default_value=data.get( - "n_cpu", min(self.config.n_cpu, n_cpu) + "n_cpu", min(self.gui_config.n_cpu, n_cpu) ), enable_events=True, ), @@ -314,7 +335,7 @@ if __name__ == "__main__": key="crossfade_length", resolution=0.01, orientation="h", - default_value=data.get("crossfade_length", "0.04"), + default_value=data.get("crossfade_length", "0.05"), enable_events=True, ), ], @@ -325,7 +346,7 @@ if __name__ == "__main__": key="extra_time", resolution=0.01, orientation="h", - default_value=data.get("extra_time", "2.0"), + default_value=data.get("extra_time", "2.5"), enable_events=True, ), ], @@ -340,7 +361,14 @@ if __name__ == "__main__": key="O_noise_reduce", enable_events=True, ), + # sg.Checkbox( + # "JIT加速", + # default=self.config.use_jit, + # key="use_jit", + # enable_events=False, + # ), ], + # [sg.Text("注:首次使用JIT加速时,会出现卡顿,\n 并伴随一些噪音,但这是正常现象!")], ], title=i18n("性能设置"), ), @@ -382,24 +410,24 @@ if __name__ == "__main__": prev_output = self.window["sg_output_device"].get() input_devices, output_devices, _, _ = self.get_devices(update=True) if prev_input not in input_devices: - self.config.sg_input_device = input_devices[0] + self.gui_config.sg_input_device = input_devices[0] else: - self.config.sg_input_device = prev_input + self.gui_config.sg_input_device = prev_input self.window["sg_input_device"].Update(values=input_devices) self.window["sg_input_device"].Update( - value=self.config.sg_input_device + value=self.gui_config.sg_input_device ) if prev_output not in output_devices: - self.config.sg_output_device = output_devices[0] + self.gui_config.sg_output_device = output_devices[0] else: - self.config.sg_output_device = prev_output + self.gui_config.sg_output_device = prev_output self.window["sg_output_device"].Update(values=output_devices) self.window["sg_output_device"].Update( - value=self.config.sg_output_device + value=self.gui_config.sg_output_device ) if event == "start_vc" and self.flag_vc == False: if self.set_values(values) == True: - logger.info("cuda_is_available: %s", torch.cuda.is_available()) + printt("cuda_is_available: %s", torch.cuda.is_available()) self.start_vc() settings = { "pth_path": values["pth_path"], @@ -410,10 +438,13 @@ if __name__ == "__main__": "pitch": values["pitch"], "rms_mix_rate": values["rms_mix_rate"], "index_rate": values["index_rate"], + # "device_latency": values["device_latency"], "block_time": values["block_time"], "crossfade_length": values["crossfade_length"], "extra_time": values["extra_time"], "n_cpu": values["n_cpu"], + # "use_jit": values["use_jit"], + "use_jit": False, "f0method": ["pm", "harvest", "crepe", "rmvpe"][ [ values["pm"], @@ -442,28 +473,28 @@ if __name__ == "__main__": stream_latency = -1 # Parameter hot update if event == "threhold": - self.config.threhold = values["threhold"] + self.gui_config.threhold = values["threhold"] elif event == "pitch": - self.config.pitch = values["pitch"] + self.gui_config.pitch = values["pitch"] if hasattr(self, "rvc"): self.rvc.change_key(values["pitch"]) elif event == "index_rate": - self.config.index_rate = values["index_rate"] + self.gui_config.index_rate = values["index_rate"] if hasattr(self, "rvc"): self.rvc.change_index_rate(values["index_rate"]) elif event == "rms_mix_rate": - self.config.rms_mix_rate = values["rms_mix_rate"] + self.gui_config.rms_mix_rate = values["rms_mix_rate"] elif event in ["pm", "harvest", "crepe", "rmvpe"]: - self.config.f0method = event + self.gui_config.f0method = event elif event == "I_noise_reduce": - self.config.I_noise_reduce = values["I_noise_reduce"] + self.gui_config.I_noise_reduce = values["I_noise_reduce"] if stream_latency > 0: self.delay_time += ( 1 if values["I_noise_reduce"] else -1 ) * values["crossfade_length"] self.window["delay_time"].update(int(self.delay_time * 1000)) elif event == "O_noise_reduce": - self.config.O_noise_reduce = values["O_noise_reduce"] + self.gui_config.O_noise_reduce = values["O_noise_reduce"] elif event in ["vc", "im"]: self.function = event elif event != "start_vc" and self.flag_vc == True: @@ -486,19 +517,21 @@ if __name__ == "__main__": sg.popup(i18n("index文件路径不可包含中文")) return False self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.rms_mix_rate = values["rms_mix_rate"] - self.config.index_rate = values["index_rate"] - self.config.n_cpu = values["n_cpu"] - self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ + self.config.use_jit = False # values["use_jit"] + # self.device_latency = values["device_latency"] + self.gui_config.pth_path = values["pth_path"] + self.gui_config.index_path = values["index_path"] + self.gui_config.threhold = values["threhold"] + self.gui_config.pitch = values["pitch"] + self.gui_config.block_time = values["block_time"] + self.gui_config.crossfade_time = values["crossfade_length"] + self.gui_config.extra_time = values["extra_time"] + self.gui_config.I_noise_reduce = values["I_noise_reduce"] + self.gui_config.O_noise_reduce = values["O_noise_reduce"] + self.gui_config.rms_mix_rate = values["rms_mix_rate"] + self.gui_config.index_rate = values["index_rate"] + self.gui_config.n_cpu = values["n_cpu"] + self.gui_config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ [ values["pm"], values["harvest"], @@ -512,34 +545,48 @@ if __name__ == "__main__": torch.cuda.empty_cache() self.flag_vc = True self.rvc = rvc_for_realtime.RVC( - self.config.pitch, - self.config.pth_path, - self.config.index_path, - self.config.index_rate, - self.config.n_cpu, + self.gui_config.pitch, + self.gui_config.pth_path, + self.gui_config.index_path, + self.gui_config.index_rate, + self.gui_config.n_cpu, inp_q, opt_q, - device, + self.config, self.rvc if hasattr(self, "rvc") else None, ) - self.config.samplerate = self.rvc.tgt_sr + self.gui_config.samplerate = self.rvc.tgt_sr self.zc = self.rvc.tgt_sr // 100 self.block_frame = ( - int(np.round(self.config.block_time * self.config.samplerate / self.zc)) + int( + np.round( + self.gui_config.block_time + * self.gui_config.samplerate + / self.zc + ) + ) * self.zc ) self.block_frame_16k = 160 * self.block_frame // self.zc self.crossfade_frame = ( int( np.round( - self.config.crossfade_time * self.config.samplerate / self.zc + self.gui_config.crossfade_time + * self.gui_config.samplerate + / self.zc ) ) * self.zc ) self.sola_search_frame = self.zc self.extra_frame = ( - int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) + int( + np.round( + self.gui_config.extra_time + * self.gui_config.samplerate + / self.zc + ) + ) * self.zc ) self.input_wav: torch.Tensor = torch.zeros( @@ -547,12 +594,12 @@ if __name__ == "__main__": + self.crossfade_frame + self.sola_search_frame + self.block_frame, - device=device, + device=self.config.device, dtype=torch.float32, ) self.input_wav_res: torch.Tensor = torch.zeros( 160 * self.input_wav.shape[0] // self.zc, - device=device, + device=self.config.device, dtype=torch.float32, ) self.pitch: np.ndarray = np.zeros( @@ -564,12 +611,12 @@ if __name__ == "__main__": dtype="float64", ) self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 + self.crossfade_frame, device=self.config.device, dtype=torch.float32 ) self.nr_buffer: torch.Tensor = self.sola_buffer.clone() self.output_buffer: torch.Tensor = self.input_wav.clone() self.res_buffer: torch.Tensor = torch.zeros( - 2 * self.zc, device=device, dtype=torch.float32 + 2 * self.zc, device=self.config.device, dtype=torch.float32 ) self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0] self.fade_in_window: torch.Tensor = ( @@ -580,7 +627,7 @@ if __name__ == "__main__": 0.0, 1.0, steps=self.crossfade_frame, - device=device, + device=self.config.device, dtype=torch.float32, ) ) @@ -588,11 +635,13 @@ if __name__ == "__main__": ) self.fade_out_window: torch.Tensor = 1 - self.fade_in_window self.resampler = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ).to(device) + orig_freq=self.gui_config.samplerate, + new_freq=16000, + dtype=torch.float32, + ).to(self.config.device) self.tg = TorchGate( - sr=self.config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9 - ).to(device) + sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9 + ).to(self.config.device) thread_vc = threading.Thread(target=self.soundinput) thread_vc.start() @@ -605,15 +654,15 @@ if __name__ == "__main__": channels=channels, callback=self.audio_callback, blocksize=self.block_frame, - samplerate=self.config.samplerate, + samplerate=self.gui_config.samplerate, dtype="float32", ) as stream: global stream_latency stream_latency = stream.latency[-1] while self.flag_vc: - time.sleep(self.config.block_time) - logger.debug("Audio block passed.") - logger.debug("ENDing VC") + time.sleep(self.gui_config.block_time) + printt("Audio block passed.") + printt("ENDing VC") def audio_callback( self, indata: np.ndarray, outdata: np.ndarray, frames, times, status @@ -623,12 +672,12 @@ if __name__ == "__main__": """ start_time = time.perf_counter() indata = librosa.to_mono(indata.T) - if self.config.threhold > -60: + if self.gui_config.threhold > -60: rms = librosa.feature.rms( y=indata, frame_length=4 * self.zc, hop_length=self.zc ) db_threhold = ( - librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold + librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold ) for i in range(db_threhold.shape[0]): if db_threhold[i]: @@ -636,12 +685,14 @@ if __name__ == "__main__": self.input_wav[: -self.block_frame] = self.input_wav[ self.block_frame : ].clone() - self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(device) + self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to( + self.config.device + ) self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[ self.block_frame_16k : ].clone() # input noise reduction and resampling - if self.config.I_noise_reduce and self.function == "vc": + if self.gui_config.I_noise_reduce and self.function == "vc": input_wav = self.input_wav[ -self.crossfade_frame - self.block_frame - 2 * self.zc : ] @@ -667,7 +718,7 @@ if __name__ == "__main__": # infer if self.function == "vc": f0_extractor_frame = self.block_frame_16k + 800 - if self.config.f0method == "rmvpe": + if self.gui_config.f0method == "rmvpe": f0_extractor_frame = ( 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 ) @@ -678,7 +729,7 @@ if __name__ == "__main__": self.valid_rate, self.pitch, self.pitchf, - self.config.f0method, + self.gui_config.f0method, ) infer_wav = infer_wav[ -self.crossfade_frame - self.sola_search_frame - self.block_frame : @@ -688,8 +739,8 @@ if __name__ == "__main__": -self.crossfade_frame - self.sola_search_frame - self.block_frame : ].clone() # output noise reduction - if (self.config.O_noise_reduce and self.function == "vc") or ( - self.config.I_noise_reduce and self.function == "im" + if (self.gui_config.O_noise_reduce and self.function == "vc") or ( + self.gui_config.I_noise_reduce and self.function == "im" ): self.output_buffer[: -self.block_frame] = self.output_buffer[ self.block_frame : @@ -699,7 +750,7 @@ if __name__ == "__main__": infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0) ).squeeze(0) # volume envelop mixing - if self.config.rms_mix_rate < 1 and self.function == "vc": + if self.gui_config.rms_mix_rate < 1 and self.function == "vc": rms1 = librosa.feature.rms( y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :] .cpu() @@ -707,7 +758,7 @@ if __name__ == "__main__": frame_length=640, hop_length=160, ) - rms1 = torch.from_numpy(rms1).to(device) + rms1 = torch.from_numpy(rms1).to(self.config.device) rms1 = F.interpolate( rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, @@ -719,7 +770,7 @@ if __name__ == "__main__": frame_length=4 * self.zc, hop_length=self.zc, ) - rms2 = torch.from_numpy(rms2).to(device) + rms2 = torch.from_numpy(rms2).to(self.config.device) rms2 = F.interpolate( rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, @@ -728,7 +779,7 @@ if __name__ == "__main__": )[0, 0, :-1] rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3) infer_wav *= torch.pow( - rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate) + rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate) ) # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC conv_input = infer_wav[ @@ -738,7 +789,7 @@ if __name__ == "__main__": cor_den = torch.sqrt( F.conv1d( conv_input**2, - torch.ones(1, 1, self.crossfade_frame, device=device), + torch.ones(1, 1, self.crossfade_frame, device=self.config.device), ) + 1e-8 ) @@ -747,7 +798,7 @@ if __name__ == "__main__": sola_offset = sola_offset.item() else: sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - logger.debug("sola_offset = %d", int(sola_offset)) + printt("sola_offset = %d", int(sola_offset)) infer_wav = infer_wav[ sola_offset : sola_offset + self.block_frame + self.crossfade_frame ] @@ -764,7 +815,7 @@ if __name__ == "__main__": ) total_time = time.perf_counter() - start_time self.window["infer_time"].update(int(total_time * 1000)) - logger.info("Infer time: %.2f", total_time) + printt("Infer time: %.2f", total_time) def get_devices(self, update: bool = True): """获取设备列表""" @@ -817,9 +868,7 @@ if __name__ == "__main__": sd.default.device[1] = output_device_indices[ output_devices.index(output_device) ] - logger.info("Input device: %s:%s", str(sd.default.device[0]), input_device) - logger.info( - "Output device: %s:%s", str(sd.default.device[1]), output_device - ) + printt("Input device: %s:%s", str(sd.default.device[0]), input_device) + printt("Output device: %s:%s", str(sd.default.device[1]), output_device) gui = GUI() diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 9fa744b..dba5ec3 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -38,6 +38,7 @@ "加载模型": "Load model", "加载预训练底模D路径": "Load pre-trained base model D path:", "加载预训练底模G路径": "Load pre-trained base model G path:", + "单次推理": "单次推理", "卸载音色省显存": "Unload voice to save GPU memory:", "变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", "性能设置": "Performance settings", "总训练轮数total_epoch": "Total training epochs (total_epoch):", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", "指定输出主人声文件夹": "Specify the output folder for vocals:", "指定输出文件夹": "Specify output folder:", @@ -86,7 +88,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", "目标采样率": "Target sample rate:", - "算法延迟(ms):": "算法延迟(ms):", + "算法延迟(ms):": "Algorithmic delays(ms):", "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:", "融合": "Fusion", "要改的模型信息": "Model information to be modified:", @@ -96,8 +98,8 @@ "训练特征索引": "Train feature index", "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", "请指定说话人id": "Please specify the speaker/singer ID:", - "请选择index文件": "请选择index文件", - "请选择pth文件": "请选择pth文件", + "请选择index文件": "Please choose the .index file", + "请选择pth文件": "Please choose the .pth file", "请选择说话人id": "Select Speaker/Singer ID:", "转换": "Convert", "输入实验名": "Enter the experiment name:", @@ -105,12 +107,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:", - "输入监听": "输入监听", + "输入监听": "Input voice monitor", "输入训练文件夹路径": "Enter the path of the training folder:", "输入设备": "Input device", "输入降噪": "Input noise reduction", "输出信息": "Output information", - "输出变声": "输出变声", + "输出变声": "Output converted voice", "输出设备": "Output device", "输出降噪": "Output noise reduction", "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", diff --git a/i18n/locale/es_ES.json b/i18n/locale/es_ES.json index 961cb3a..fdd17f0 100644 --- a/i18n/locale/es_ES.json +++ b/i18n/locale/es_ES.json @@ -38,6 +38,7 @@ "加载模型": "Cargar modelo", "加载预训练底模D路径": "Cargue la ruta del modelo D base pre-entrenada.", "加载预训练底模G路径": "Cargue la ruta del modelo G base pre-entrenada.", + "单次推理": "单次推理", "卸载音色省显存": "Descargue la voz para ahorrar memoria GPU", "变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)", "后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento", "性能设置": "Configuración de rendimiento", "总训练轮数total_epoch": "Total de épocas de entrenamiento (total_epoch)", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).", "指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal", "指定输出文件夹": "Especificar carpeta de salida", diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index d12078e..64bb37a 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -38,6 +38,7 @@ "加载模型": "Charger le modèle.", "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", + "单次推理": "单次推理", "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", "性能设置": "Paramètres de performance", "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", "指定输出文件夹": "Spécifiez le dossier de sortie :", diff --git a/i18n/locale/it_IT.json b/i18n/locale/it_IT.json index 38fdef8..02eac59 100644 --- a/i18n/locale/it_IT.json +++ b/i18n/locale/it_IT.json @@ -38,6 +38,7 @@ "加载模型": "Carica modello", "加载预训练底模D路径": "Carica il percorso D del modello base pre-addestrato:", "加载预训练底模G路径": "Carica il percorso G del modello base pre-addestrato:", + "单次推理": "单次推理", "卸载音色省显存": "Scarica la voce per risparmiare memoria della GPU:", "变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Sfortunatamente, non è disponibile alcuna GPU compatibile per supportare l'addestramento.", "性能设置": "Impostazioni delle prestazioni", "总训练轮数total_epoch": "Epoch totali di addestramento (total_epoch):", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione massiva. Inserisci il percorso della cartella che contiene i file da convertire o carica più file audio. I file convertiti finiranno nella cartella specificata. (default: opt) ", "指定输出主人声文件夹": "Specifica la cartella di output per le voci:", "指定输出文件夹": "Specifica la cartella di output:", diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index 903ed87..d02f331 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -38,6 +38,7 @@ "加载模型": "モデルをロード", "加载预训练底模D路径": "事前学習済みのDモデルのパス", "加载预训练底模G路径": "事前学習済みのGモデルのパス", + "单次推理": "单次推理", "卸载音色省显存": "音源を削除してメモリを節約", "变调(整数, 半音数量, 升八度12降八度-12)": "ピッチ変更(整数、半音数、上下オクターブ12-12)", "后处理重采样至最终采样率,0为不进行重采样": "最終的なサンプリングレートへのポストプロセッシングのリサンプリング リサンプリングしない場合は0", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "トレーニングに対応したGPUが動作しないのは残念です。", "性能设置": "パフォーマンス設定", "总训练轮数total_epoch": "総エポック数", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換、変換する音声フォルダを入力、または複数の音声ファイルをアップロードし、指定したフォルダ(デフォルトのopt)に変換した音声を出力します。", "指定输出主人声文件夹": "マスターの出力音声フォルダーを指定する", "指定输出文件夹": "出力フォルダを指定してください", diff --git a/i18n/locale/ru_RU.json b/i18n/locale/ru_RU.json index b6530ac..9d7ef8e 100644 --- a/i18n/locale/ru_RU.json +++ b/i18n/locale/ru_RU.json @@ -38,6 +38,7 @@ "加载模型": "Загрузить модель", "加载预训练底模D路径": "Путь к предварительно обученной базовой модели D:", "加载预训练底模G路径": "Путь к предварительно обученной базовой модели G:", + "单次推理": "单次推理", "卸载音色省显存": "Выгрузить модель из памяти GPU для освобождения ресурсов", "变调(整数, 半音数量, 升八度12降八度-12)": "Изменить высоту голоса (укажите количество полутонов; чтобы поднять голос на октаву, выберите 12, понизить на октаву — -12):", "后处理重采样至最终采样率,0为不进行重采样": "Изменить частоту дискретизации в выходном файле на финальную. Поставьте 0, чтобы ничего не изменялось:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "К сожалению, у вас нету графического процессора, который поддерживает обучение моделей.", "性能设置": "Настройки быстроты", "总训练轮数total_epoch": "Полное количество эпох (total_epoch):", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Массовое преобразование. Введите путь к папке, в которой находятся файлы для преобразования голоса или выгрузите несколько аудиофайлов. Сконвертированные файлы будут сохранены в указанной папке (по умолчанию: 'opt').", "指定输出主人声文件夹": "Путь к папке для сохранения вокала:", "指定输出文件夹": "Папка для результатов:", diff --git a/i18n/locale/tr_TR.json b/i18n/locale/tr_TR.json index efd921b..04c6102 100644 --- a/i18n/locale/tr_TR.json +++ b/i18n/locale/tr_TR.json @@ -38,6 +38,7 @@ "加载模型": "Model yükle", "加载预训练底模D路径": "Önceden eğitilmiş temel D modelini yükleme yolu:", "加载预训练底模G路径": "Önceden eğitilmiş temel G modelini yükleme yolu:", + "单次推理": "单次推理", "卸载音色省显存": "GPU bellek kullanımını azaltmak için sesi kaldır", "变调(整数, 半音数量, 升八度12降八度-12)": "Transpoze et (tamsayı, yarıton sayısıyla; bir oktav yükseltmek için: 12, bir oktav düşürmek için: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Son işleme aşamasında çıktı sesini son örnekleme hızına yeniden örnekle. 0 değeri için yeniden örnekleme yapılmaz:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Maalesef, eğitiminizi desteklemek için uyumlu bir GPU bulunmamaktadır.", "性能设置": "Performans ayarları", "总训练轮数total_epoch": "Toplam eğitim turu (total_epoch):", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Toplu dönüştür. Dönüştürülecek ses dosyalarının bulunduğu klasörü girin veya birden çok ses dosyasını yükleyin. Dönüştürülen ses dosyaları belirtilen klasöre ('opt' varsayılan olarak) dönüştürülecektir", "指定输出主人声文件夹": "Vokal için çıkış klasörünü belirtin:", "指定输出文件夹": "Çıkış klasörünü belirt:", diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index b14e5f0..2c77001 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -38,6 +38,7 @@ "加载模型": "加载模型", "加载预训练底模D路径": "加载预训练底模D路径", "加载预训练底模G路径": "加载预训练底模G路径", + "单次推理": "单次推理", "卸载音色省显存": "卸载音色省显存", "变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "性能设置", "总训练轮数total_epoch": "总训练轮数total_epoch", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定输出文件夹", diff --git a/i18n/locale/zh_HK.json b/i18n/locale/zh_HK.json index fa2fbad..b7f6171 100644 --- a/i18n/locale/zh_HK.json +++ b/i18n/locale/zh_HK.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单次推理": "单次推理", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾", diff --git a/i18n/locale/zh_SG.json b/i18n/locale/zh_SG.json index fa2fbad..b7f6171 100644 --- a/i18n/locale/zh_SG.json +++ b/i18n/locale/zh_SG.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单次推理": "单次推理", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾", diff --git a/i18n/locale/zh_TW.json b/i18n/locale/zh_TW.json index fa2fbad..b7f6171 100644 --- a/i18n/locale/zh_TW.json +++ b/i18n/locale/zh_TW.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单次推理": "单次推理", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批量推理": "批量推理", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾", diff --git a/infer-web.py b/infer-web.py index 542b42b..57c8845 100644 --- a/infer-web.py +++ b/infer-web.py @@ -1,36 +1,46 @@ -import os, sys +import os +import sys now_dir = os.getcwd() sys.path.append(now_dir) -import logging -import shutil -import threading -import traceback -import warnings -from random import shuffle -from subprocess import Popen -from time import sleep -import json -import pathlib - -import fairseq -import faiss -import gradio as gr -import numpy as np -import torch -from dotenv import load_dotenv -from sklearn.cluster import MiniBatchKMeans - -from configs.config import Config -from i18n.i18n import I18nAuto +from infer.modules.vc.modules import VC +from infer.modules.uvr5.modules import uvr from infer.lib.train.process_ckpt import ( change_info, extract_small_model, merge, show_info, ) -from infer.modules.uvr5.modules import uvr -from infer.modules.vc.modules import VC +from i18n.i18n import I18nAuto +from configs.config import Config +from sklearn.cluster import MiniBatchKMeans +from dotenv import load_dotenv +import torch + +try: + import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import + + if torch.xpu.is_available(): + from infer.modules.ipex import ipex_init + + ipex_init() +except Exception: # pylint: disable=broad-exception-caught + pass +import numpy as np +import gradio as gr +import faiss +import fairseq +import pathlib +import json +from time import sleep +from subprocess import Popen +from random import shuffle +import warnings +import traceback +import threading +import shutil +import logging + logging.getLogger("numba").setLevel(logging.WARNING) @@ -165,10 +175,10 @@ def clean(): return {"value": "", "__type__": "update"} -def export_onnx(): +def export_onnx(ModelPath, ExportedPath): from infer.modules.onnx.export import export_onnx as eo - eo() + eo(ModelPath, ExportedPath) sr_dict = { @@ -219,8 +229,9 @@ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): per, ) logger.info(cmd) - p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir + p = Popen(cmd, shell=True) + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, @@ -263,7 +274,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp p = Popen( cmd, shell=True, cwd=now_dir ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, @@ -295,7 +306,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, # @@ -331,7 +342,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp log = f.read() logger.info(log) yield log - ####对不同part分别开多进程 + # 对不同part分别开多进程 """ n_part=int(sys.argv[1]) i_part=int(sys.argv[2]) @@ -360,7 +371,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, @@ -701,11 +712,11 @@ def train1key( infos.append(strr) return "\n".join(infos) - ####### step1:处理数据 + # step1:处理数据 yield get_info_str(i18n("step1:正在处理数据")) [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)] - ####### step2a:提取音高 + # step2a:提取音高 yield get_info_str(i18n("step2:正在提取音高&正在提取特征")) [ get_info_str(_) @@ -714,7 +725,7 @@ def train1key( ) ] - ####### step3a:训练模型 + # step3a:训练模型 yield get_info_str(i18n("step3a:正在训练模型")) click_train( exp_dir1, @@ -734,7 +745,7 @@ def train1key( ) yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")) - ####### step3b:训练索引 + # step3b:训练索引 [get_info_str(_) for _ in train_index(exp_dir1, version19)] yield get_info_str(i18n("全流程结束!")) @@ -768,6 +779,7 @@ def change_f0_method(f0method8): with gr.Blocks(title="RVC WebUI") as app: + gr.Markdown("## RVC WebUI") gr.Markdown( value=i18n( "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." @@ -777,8 +789,9 @@ with gr.Blocks(title="RVC WebUI") as app: with gr.TabItem(i18n("模型推理")): with gr.Row(): sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names)) - refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary") - clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") + with gr.Column(): + refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary") + clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") spk_item = gr.Slider( minimum=0, maximum=2333, @@ -791,118 +804,125 @@ with gr.Blocks(title="RVC WebUI") as app: clean_button.click( fn=clean, inputs=[], outputs=[sid0], api_name="infer_clean" ) - with gr.Group(): - gr.Markdown( - value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ") - ) - with gr.Row(): - with gr.Column(): - vc_transform0 = gr.Number( - label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 - ) - input_audio0 = gr.Textbox( - label=i18n("输入待处理音频文件路径(默认是正确格式示例)"), - value="E:\\codes\\py39\\test-20230416b\\todo-songs\\冬之花clip1.wav", - ) - f0method0 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" - ), - choices=["pm", "harvest", "crepe", "rmvpe"] - if config.dml == False - else ["pm", "harvest", "rmvpe"], - value="pm", - interactive=True, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - with gr.Column(): - file_index1 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=True, - ) - file_index2 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=sorted(index_paths), - interactive=True, - ) - refresh_button.click( - fn=change_choices, - inputs=[], - outputs=[sid0, file_index2], - api_name="infer_refresh", - ) - # file_big_npy1 = gr.Textbox( - # label=i18n("特征文件路径"), - # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", - # interactive=True, - # ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.75, - interactive=True, - ) - with gr.Column(): - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=0.25, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" - ), - value=0.33, - step=0.01, - interactive=True, - ) - f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) - but0 = gr.Button(i18n("转换"), variant="primary") + with gr.TabItem(i18n("单次推理")): + with gr.Group(): with gr.Row(): - vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) - but0.click( - vc.vc_single, - [ - spk_item, - input_audio0, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - # file_big_npy1, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - ], - [vc_output1, vc_output2], - api_name="infer_convert", - ) - with gr.Group(): + with gr.Column(): + vc_transform0 = gr.Number( + label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 + ) + input_audio0 = gr.Textbox( + label=i18n("输入待处理音频文件路径(默认是正确格式示例)"), + placeholder="C:\\Users\\Desktop\\audio_example.wav", + ) + file_index1 = gr.Textbox( + label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), + placeholder="C:\\Users\\Desktop\\model_example.index", + interactive=True, + ) + file_index2 = gr.Dropdown( + label=i18n("自动检测index路径,下拉式选择(dropdown)"), + choices=sorted(index_paths), + interactive=True, + ) + f0method0 = gr.Radio( + label=i18n( + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" + ), + choices=["pm", "harvest", "crepe", "rmvpe"] + if config.dml == False + else ["pm", "harvest", "rmvpe"], + value="rmvpe", + interactive=True, + ) + + with gr.Column(): + resample_sr0 = gr.Slider( + minimum=0, + maximum=48000, + label=i18n("后处理重采样至最终采样率,0为不进行重采样"), + value=0, + step=1, + interactive=True, + ) + rms_mix_rate0 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), + value=0.25, + interactive=True, + ) + protect0 = gr.Slider( + minimum=0, + maximum=0.5, + label=i18n( + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" + ), + value=0.33, + step=0.01, + interactive=True, + ) + filter_radius0 = gr.Slider( + minimum=0, + maximum=7, + label=i18n( + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音" + ), + value=3, + step=1, + interactive=True, + ) + index_rate1 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("检索特征占比"), + value=0.75, + interactive=True, + ) + f0_file = gr.File( + label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), + visible=False, + ) + + refresh_button.click( + fn=change_choices, + inputs=[], + outputs=[sid0, file_index2], + api_name="infer_refresh", + ) + # file_big_npy1 = gr.Textbox( + # label=i18n("特征文件路径"), + # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", + # interactive=True, + # ) + with gr.Group(): + with gr.Column(): + but0 = gr.Button(i18n("转换"), variant="primary") + with gr.Row(): + vc_output1 = gr.Textbox(label=i18n("输出信息")) + vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) + + but0.click( + vc.vc_single, + [ + spk_item, + input_audio0, + vc_transform0, + f0_file, + f0method0, + file_index1, + file_index2, + # file_big_npy1, + index_rate1, + filter_radius0, + resample_sr0, + rms_mix_rate0, + protect0, + ], + [vc_output1, vc_output2], + api_name="infer_convert", + ) + with gr.TabItem(i18n("批量推理")): gr.Markdown( value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") ) @@ -912,25 +932,6 @@ with gr.Blocks(title="RVC WebUI") as app: label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 ) opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") - f0method1 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" - ), - choices=["pm", "harvest", "crepe", "rmvpe"] - if config.dml == False - else ["pm", "harvest", "rmvpe"], - value="pm", - interactive=True, - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - with gr.Column(): file_index3 = gr.Textbox( label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), value="", @@ -941,6 +942,23 @@ with gr.Blocks(title="RVC WebUI") as app: choices=sorted(index_paths), interactive=True, ) + f0method1 = gr.Radio( + label=i18n( + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" + ), + choices=["pm", "harvest", "crepe", "rmvpe"] + if config.dml == False + else ["pm", "harvest", "rmvpe"], + value="rmvpe", + interactive=True, + ) + format1 = gr.Radio( + label=i18n("导出文件格式"), + choices=["wav", "flac", "mp3", "m4a"], + value="wav", + interactive=True, + ) + refresh_button.click( fn=lambda: change_choices()[1], inputs=[], @@ -952,13 +970,7 @@ with gr.Blocks(title="RVC WebUI") as app: # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", # interactive=True, # ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=1, - interactive=True, - ) + with gr.Column(): resample_sr1 = gr.Slider( minimum=0, @@ -985,23 +997,34 @@ with gr.Blocks(title="RVC WebUI") as app: step=0.01, interactive=True, ) - with gr.Column(): - dir_input = gr.Textbox( - label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), - value="E:\codes\py39\\test-20230416b\\todo-songs", - ) - inputs = gr.File( - file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") - ) - with gr.Row(): - format1 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", + filter_radius1 = gr.Slider( + minimum=0, + maximum=7, + label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), + value=3, + step=1, interactive=True, ) - but1 = gr.Button(i18n("转换"), variant="primary") - vc_output3 = gr.Textbox(label=i18n("输出信息")) + index_rate2 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("检索特征占比"), + value=1, + interactive=True, + ) + with gr.Row(): + dir_input = gr.Textbox( + label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), + placeholder="C:\\Users\\Desktop\\input_vocal_dir", + ) + inputs = gr.File( + file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") + ) + + with gr.Row(): + but1 = gr.Button(i18n("转换"), variant="primary") + vc_output3 = gr.Textbox(label=i18n("输出信息")) + but1.click( vc.vc_multi, [ @@ -1024,12 +1047,12 @@ with gr.Blocks(title="RVC WebUI") as app: [vc_output3], api_name="infer_convert_batch", ) - sid0.change( - fn=vc.get_vc, - inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1, file_index2, file_index4], - api_name="infer_change_voice", - ) + sid0.change( + fn=vc.get_vc, + inputs=[sid0, protect0, protect1], + outputs=[spk_item, protect0, protect1, file_index2, file_index4], + api_name="infer_change_voice", + ) with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): with gr.Group(): gr.Markdown( @@ -1041,7 +1064,7 @@ with gr.Blocks(title="RVC WebUI") as app: with gr.Column(): dir_wav_input = gr.Textbox( label=i18n("输入待处理音频文件夹路径"), - value="E:\\codes\\py39\\test-20230416b\\todo-songs\\todo-songs", + placeholder="C:\\Users\\Desktop\\todo-songs", ) wav_inputs = gr.File( file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") diff --git a/infer/lib/infer_pack/attentions.py b/infer/lib/infer_pack/attentions.py index 2b6060c..2cc745a 100644 --- a/infer/lib/infer_pack/attentions.py +++ b/infer/lib/infer_pack/attentions.py @@ -1,5 +1,6 @@ import copy import math +from typing import Optional import numpy as np import torch @@ -22,11 +23,11 @@ class Encoder(nn.Module): window_size=10, **kwargs ): - super().__init__() + super(Encoder, self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads - self.n_layers = n_layers + self.n_layers = int(n_layers) self.kernel_size = kernel_size self.p_dropout = p_dropout self.window_size = window_size @@ -61,14 +62,17 @@ class Encoder(nn.Module): def forward(self, x, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) + zippep = zip( + self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2 + ) + for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zippep: + y = attn_layers(x, x, attn_mask) y = self.drop(y) - x = self.norm_layers_1[i](x + y) + x = norm_layers_1(x + y) - y = self.ffn_layers[i](x, x_mask) + y = ffn_layers(x, x_mask) y = self.drop(y) - x = self.norm_layers_2[i](x + y) + x = norm_layers_2(x + y) x = x * x_mask return x @@ -86,7 +90,7 @@ class Decoder(nn.Module): proximal_init=True, **kwargs ): - super().__init__() + super(Decoder, self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads @@ -172,7 +176,7 @@ class MultiHeadAttention(nn.Module): proximal_bias=False, proximal_init=False, ): - super().__init__() + super(MultiHeadAttention, self).__init__() assert channels % n_heads == 0 self.channels = channels @@ -213,19 +217,28 @@ class MultiHeadAttention(nn.Module): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) - def forward(self, x, c, attn_mask=None): + def forward( + self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None + ): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) - x, self.attn = self.attention(q, k, v, mask=attn_mask) + x, _ = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x - def attention(self, query, key, value, mask=None): + def attention( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ): # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) + b, d, t_s = key.size() + t_t = query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) @@ -292,16 +305,17 @@ class MultiHeadAttention(nn.Module): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret - def _get_relative_embeddings(self, relative_embeddings, length): + def _get_relative_embeddings(self, relative_embeddings, length: int): max_relative_position = 2 * self.window_size + 1 # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) + pad_length: int = max(length - (self.window_size + 1), 0) slice_start_position = max((self.window_size + 1) - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + # commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + [0, 0, pad_length, pad_length, 0, 0], ) else: padded_relative_embeddings = relative_embeddings @@ -317,12 +331,18 @@ class MultiHeadAttention(nn.Module): """ batch, heads, length, _ = x.size() # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + x = F.pad( + x, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]) + [0, 1, 0, 0, 0, 0, 0, 0], + ) # Concat extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + x_flat, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, int(length) - 1]]) + [0, int(length) - 1, 0, 0, 0, 0], ) # Reshape and slice out the padded elements. @@ -339,15 +359,21 @@ class MultiHeadAttention(nn.Module): batch, heads, length, _ = x.size() # padd along column x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + x, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, int(length) - 1]]) + [0, int(length) - 1, 0, 0, 0, 0, 0, 0], ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + x_flat = x.view([batch, heads, int(length**2) + int(length * (length - 1))]) # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_flat = F.pad( + x_flat, + # commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]]) + [length, 0, 0, 0, 0, 0], + ) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final - def _attention_bias_proximal(self, length): + def _attention_bias_proximal(self, length: int): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. @@ -367,10 +393,10 @@ class FFN(nn.Module): filter_channels, kernel_size, p_dropout=0.0, - activation=None, + activation: str = None, causal=False, ): - super().__init__() + super(FFN, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels @@ -378,40 +404,56 @@ class FFN(nn.Module): self.p_dropout = p_dropout self.activation = activation self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding + self.is_activation = True if activation == "gelu" else False + # if causal: + # self.padding = self._causal_padding + # else: + # self.padding = self._same_padding self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) self.drop = nn.Dropout(p_dropout) - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": + def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor: + if self.causal: + padding = self._causal_padding(x * x_mask) + else: + padding = self._same_padding(x * x_mask) + return padding + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor): + x = self.conv_1(self.padding(x, x_mask)) + if self.is_activation: x = x * torch.sigmoid(1.702 * x) else: x = torch.relu(x) x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) + + x = self.conv_2(self.padding(x, x_mask)) return x * x_mask def _causal_padding(self, x): if self.kernel_size == 1: return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) + pad_l: int = self.kernel_size - 1 + pad_r: int = 0 + # padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad( + x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r, 0, 0, 0, 0], + ) return x def _same_padding(self, x): if self.kernel_size == 1: return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) + pad_l: int = (self.kernel_size - 1) // 2 + pad_r: int = self.kernel_size // 2 + # padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad( + x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r, 0, 0, 0, 0], + ) return x diff --git a/infer/lib/infer_pack/commons.py b/infer/lib/infer_pack/commons.py index 7ba7d21..4ec6c24 100644 --- a/infer/lib/infer_pack/commons.py +++ b/infer/lib/infer_pack/commons.py @@ -1,3 +1,4 @@ +from typing import List, Optional import math import numpy as np @@ -16,10 +17,10 @@ def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape +# def convert_pad_shape(pad_shape): +# l = pad_shape[::-1] +# pad_shape = [item for sublist in l for item in sublist] +# return pad_shape def kl_divergence(m_p, logs_p, m_q, logs_q): @@ -113,10 +114,14 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): return acts -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape +# def convert_pad_shape(pad_shape): +# l = pad_shape[::-1] +# pad_shape = [item for sublist in l for item in sublist] +# return pad_shape + + +def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]: + return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist() def shift_1d(x): @@ -124,7 +129,7 @@ def shift_1d(x): return x -def sequence_mask(length, max_length=None): +def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None): if max_length is None: max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) diff --git a/infer/lib/infer_pack/models.py b/infer/lib/infer_pack/models.py index 711db22..a60ced6 100644 --- a/infer/lib/infer_pack/models.py +++ b/infer/lib/infer_pack/models.py @@ -1,5 +1,6 @@ import math import logging +from typing import Optional logger = logging.getLogger(__name__) @@ -28,25 +29,32 @@ class TextEncoder256(nn.Module): p_dropout, f0=True, ): - super().__init__() + super(TextEncoder256, self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.emb_phone = nn.Linear(256, hidden_channels) self.lrelu = nn.LeakyReLU(0.1, inplace=True) if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone, pitch, lengths): - if pitch == None: + def forward( + self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor + ): + if pitch is None: x = self.emb_phone(phone) else: x = self.emb_phone(phone) + self.emb_pitch(pitch) @@ -75,25 +83,30 @@ class TextEncoder768(nn.Module): p_dropout, f0=True, ): - super().__init__() + super(TextEncoder768, self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.emb_phone = nn.Linear(768, hidden_channels) self.lrelu = nn.LeakyReLU(0.1, inplace=True) if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone, pitch, lengths): - if pitch == None: + def forward(self, phone: torch.Tensor, pitch: torch.Tensor, lengths: torch.Tensor): + if pitch is None: x = self.emb_phone(phone) else: x = self.emb_phone(phone) + self.emb_pitch(pitch) @@ -121,7 +134,7 @@ class ResidualCouplingBlock(nn.Module): n_flows=4, gin_channels=0, ): - super().__init__() + super(ResidualCouplingBlock, self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -145,19 +158,36 @@ class ResidualCouplingBlock(nn.Module): ) self.flows.append(modules.Flip()) - def forward(self, x, x_mask, g=None, reverse=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) + for flow in self.flows[::-1]: + x, _ = flow.forward(x, x_mask, g=g, reverse=reverse) return x def remove_weight_norm(self): for i in range(self.n_flows): self.flows[i * 2].remove_weight_norm() + def __prepare_scriptable__(self): + for i in range(self.n_flows): + for hook in self.flows[i * 2]._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flows[i * 2]) + + return self + class PosteriorEncoder(nn.Module): def __init__( @@ -170,7 +200,7 @@ class PosteriorEncoder(nn.Module): n_layers, gin_channels=0, ): - super().__init__() + super(PosteriorEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels @@ -189,7 +219,9 @@ class PosteriorEncoder(nn.Module): ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, x, x_lengths, g=None): + def forward( + self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None + ): x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) @@ -203,6 +235,15 @@ class PosteriorEncoder(nn.Module): def remove_weight_norm(self): self.enc.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.enc._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc) + return self + class Generator(torch.nn.Module): def __init__( @@ -252,7 +293,7 @@ class Generator(torch.nn.Module): if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - def forward(self, x, g=None): + def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None): x = self.conv_pre(x) if g is not None: x = x + self.cond(g) @@ -273,6 +314,28 @@ class Generator(torch.nn.Module): return x + def __prepare_scriptable__(self): + for l in self.ups: + for hook in l._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + + for l in self.resblocks: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + def remove_weight_norm(self): for l in self.ups: remove_weight_norm(l) @@ -293,7 +356,7 @@ class SineGen(torch.nn.Module): voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) + segment is always sin(torch.pi) or cos(0) """ def __init__( @@ -321,7 +384,7 @@ class SineGen(torch.nn.Module): uv = uv.float() return uv - def forward(self, f0, upp): + def forward(self, f0: torch.Tensor, upp: int): """sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 @@ -333,7 +396,7 @@ class SineGen(torch.nn.Module): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): + for idx in range(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( idx + 2 ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic @@ -347,12 +410,12 @@ class SineGen(torch.nn.Module): tmp_over_one *= upp tmp_over_one = F.interpolate( tmp_over_one.transpose(2, 1), - scale_factor=upp, + scale_factor=float(upp), mode="linear", align_corners=True, ).transpose(2, 1) rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + rad_values.transpose(2, 1), scale_factor=float(upp), mode="nearest" ).transpose( 2, 1 ) ####### @@ -361,12 +424,12 @@ class SineGen(torch.nn.Module): cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * torch.pi ) sine_waves = sine_waves * self.sine_amp uv = self._f02uv(f0) uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" + uv.transpose(2, 1), scale_factor=float(upp), mode="nearest" ).transpose(2, 1) noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) @@ -414,18 +477,19 @@ class SourceModuleHnNSF(torch.nn.Module): # to merge source harmonics into a single excitation self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) self.l_tanh = torch.nn.Tanh() + # self.ddtype:int = -1 - def forward(self, x, upp=None): - if hasattr(self, "ddtype") == False: - self.ddtype = self.l_linear.weight.dtype + def forward(self, x: torch.Tensor, upp: int = 1): + # if self.ddtype ==-1: + # self.ddtype = self.l_linear.weight.dtype sine_wavs, uv, _ = self.l_sin_gen(x, upp) # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype) # if self.is_half: # sine_wavs = sine_wavs.half() # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x))) # print(sine_wavs.dtype,self.ddtype) - if sine_wavs.dtype != self.ddtype: - sine_wavs = sine_wavs.to(self.ddtype) + # if sine_wavs.dtype != self.l_linear.weight.dtype: + sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) return sine_merge, None, None # noise, uv @@ -448,7 +512,7 @@ class GeneratorNSF(torch.nn.Module): self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates)) self.m_source = SourceModuleHnNSF( sampling_rate=sr, harmonic_num=0, is_half=is_half ) @@ -473,7 +537,7 @@ class GeneratorNSF(torch.nn.Module): ) ) if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) + stride_f0 = math.prod(upsample_rates[i + 1 :]) self.noise_convs.append( Conv1d( 1, @@ -500,27 +564,36 @@ class GeneratorNSF(torch.nn.Module): if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - self.upp = np.prod(upsample_rates) + self.upp = math.prod(upsample_rates) - def forward(self, x, f0, g=None): + self.lrelu_slope = modules.LRELU_SLOPE + + def forward(self, x, f0, g: Optional[torch.Tensor] = None): har_source, noi_source, uv = self.m_source(f0, self.upp) har_source = har_source.transpose(1, 2) x = self.conv_pre(x) if g is not None: x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels + # torch.jit.script() does not support direct indexing of torch modules + # That's why I wrote this + for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)): + if i < self.num_upsamples: + x = F.leaky_relu(x, self.lrelu_slope) + x = ups(x) + x_source = noise_convs(har_source) + x = x + x_source + xs: Optional[torch.Tensor] = None + l = [i * self.num_kernels + j for j in range(self.num_kernels)] + for j, resblock in enumerate(self.resblocks): + if j in l: + if xs is None: + xs = resblock(x) + else: + xs += resblock(x) + # This assertion cannot be ignored! \ + # If ignored, it will cause torch.jit.script() compilation errors + assert isinstance(xs, torch.Tensor) + x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) x = torch.tanh(x) @@ -532,6 +605,27 @@ class GeneratorNSF(torch.nn.Module): for l in self.resblocks: l.remove_weight_norm() + def __prepare_scriptable__(self): + for l in self.ups: + for hook in l._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + for l in self.resblocks: + for hook in self.resblocks._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + sr2sr = { "32k": 32000, @@ -563,8 +657,8 @@ class SynthesizerTrnMs256NSFsid(nn.Module): sr, **kwargs ): - super().__init__() - if type(sr) == type("strr"): + super(SynthesizerTrnMs256NSFsid, self).__init__() + if isinstance(sr, str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -573,7 +667,7 @@ class SynthesizerTrnMs256NSFsid(nn.Module): self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.resblock = resblock self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes @@ -591,7 +685,7 @@ class SynthesizerTrnMs256NSFsid(nn.Module): n_heads, n_layers, kernel_size, - p_dropout, + float(p_dropout), ) self.dec = GeneratorNSF( inter_channels, @@ -630,8 +724,42 @@ class SynthesizerTrnMs256NSFsid(nn.Module): self.flow.remove_weight_norm() self.enc_q.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.dec._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.dec) + for hook in self.flow._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flow) + if hasattr(self, "enc_q"): + for hook in self.enc_q._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc_q) + return self + + @torch.jit.ignore def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + pitch: torch.Tensor, + pitchf: torch.Tensor, + y: torch.Tensor, + y_lengths: torch.Tensor, + ds: Optional[torch.Tensor] = None, ): # 这里ds是id,[bs,1] # print(1,pitch.shape)#[bs,t] g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 @@ -647,15 +775,25 @@ class SynthesizerTrnMs256NSFsid(nn.Module): o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + @torch.jit.export + def infer( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + pitch: torch.Tensor, + nsff0: torch.Tensor, + sid: torch.Tensor, + rate: Optional[torch.Tensor] = None, + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] + if rate is not None: + assert isinstance(rate, torch.Tensor) + head = int(z_p.shape[2] * (1 - rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, nsff0, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -684,8 +822,8 @@ class SynthesizerTrnMs768NSFsid(nn.Module): sr, **kwargs ): - super().__init__() - if type(sr) == type("strr"): + super(SynthesizerTrnMs768NSFsid, self).__init__() + if isinstance(sr, str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -694,7 +832,7 @@ class SynthesizerTrnMs768NSFsid(nn.Module): self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.resblock = resblock self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes @@ -712,7 +850,7 @@ class SynthesizerTrnMs768NSFsid(nn.Module): n_heads, n_layers, kernel_size, - p_dropout, + float(p_dropout), ) self.dec = GeneratorNSF( inter_channels, @@ -751,6 +889,33 @@ class SynthesizerTrnMs768NSFsid(nn.Module): self.flow.remove_weight_norm() self.enc_q.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.dec._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.dec) + for hook in self.flow._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flow) + if hasattr(self, "enc_q"): + for hook in self.enc_q._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc_q) + return self + + @torch.jit.ignore def forward( self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds ): # 这里ds是id,[bs,1] @@ -768,15 +933,24 @@ class SynthesizerTrnMs768NSFsid(nn.Module): o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + @torch.jit.export + def infer( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + pitch: torch.Tensor, + nsff0: torch.Tensor, + sid: torch.Tensor, + rate: Optional[torch.Tensor] = None, + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] + if rate is not None: + head = int(z_p.shape[2] * (1.0 - rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, nsff0, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -805,7 +979,7 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module): sr=None, **kwargs ): - super().__init__() + super(SynthesizerTrnMs256NSFsid_nono, self).__init__() self.spec_channels = spec_channels self.inter_channels = inter_channels self.hidden_channels = hidden_channels @@ -813,7 +987,7 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module): self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.resblock = resblock self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes @@ -831,7 +1005,7 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module): n_heads, n_layers, kernel_size, - p_dropout, + float(p_dropout), f0=False, ) self.dec = Generator( @@ -869,6 +1043,33 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module): self.flow.remove_weight_norm() self.enc_q.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.dec._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.dec) + for hook in self.flow._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flow) + if hasattr(self, "enc_q"): + for hook in self.enc_q._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc_q) + return self + + @torch.jit.ignore def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) @@ -880,14 +1081,22 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module): o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate=None): + @torch.jit.export + def infer( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + sid: torch.Tensor, + rate: Optional[torch.Tensor] = None, + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] + if rate is not None: + head = int(z_p.shape[2] * (1.0 - rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -916,7 +1125,7 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module): sr=None, **kwargs ): - super().__init__() + super(self, SynthesizerTrnMs768NSFsid_nono).__init__() self.spec_channels = spec_channels self.inter_channels = inter_channels self.hidden_channels = hidden_channels @@ -924,7 +1133,7 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module): self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.resblock = resblock self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes @@ -942,7 +1151,7 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module): n_heads, n_layers, kernel_size, - p_dropout, + float(p_dropout), f0=False, ) self.dec = Generator( @@ -980,6 +1189,33 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module): self.flow.remove_weight_norm() self.enc_q.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.dec._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.dec) + for hook in self.flow._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flow) + if hasattr(self, "enc_q"): + for hook in self.enc_q._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc_q) + return self + + @torch.jit.ignore def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) @@ -991,14 +1227,22 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module): o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate=None): + @torch.jit.export + def infer( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + sid: torch.Tensor, + rate: Optional[torch.Tensor] = None, + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] + if rate is not None: + head = int(z_p.shape[2] * (1.0 - rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, g=g) return o, x_mask, (z, z_p, m_p, logs_p) diff --git a/infer/lib/infer_pack/models_onnx.py b/infer/lib/infer_pack/models_onnx.py index 3e99763..ff60414 100644 --- a/infer/lib/infer_pack/models_onnx.py +++ b/infer/lib/infer_pack/models_onnx.py @@ -551,7 +551,7 @@ class SynthesizerTrnMsNSFsidM(nn.Module): gin_channels, sr, version, - **kwargs + **kwargs, ): super().__init__() if type(sr) == type("strr"): @@ -621,10 +621,7 @@ class SynthesizerTrnMsNSFsidM(nn.Module): self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) self.speaker_map = None logger.debug( - "gin_channels: " - + gin_channels - + ", self.spk_embed_dim: " - + self.spk_embed_dim + f"gin_channels: {gin_channels}, self.spk_embed_dim: {self.spk_embed_dim}" ) def remove_weight_norm(self): diff --git a/infer/lib/infer_pack/modules.py b/infer/lib/infer_pack/modules.py index edf2207..51aeaf0 100644 --- a/infer/lib/infer_pack/modules.py +++ b/infer/lib/infer_pack/modules.py @@ -1,5 +1,6 @@ import copy import math +from typing import Optional, Tuple import numpy as np import scipy @@ -18,7 +19,7 @@ LRELU_SLOPE = 0.1 class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): - super().__init__() + super(LayerNorm, self).__init__() self.channels = channels self.eps = eps @@ -41,13 +42,13 @@ class ConvReluNorm(nn.Module): n_layers, p_dropout, ): - super().__init__() + super(ConvReluNorm, self).__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.n_layers = n_layers - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) assert n_layers > 1, "Number of layers should be larger than 0." self.conv_layers = nn.ModuleList() @@ -58,7 +59,7 @@ class ConvReluNorm(nn.Module): ) ) self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(float(p_dropout))) for _ in range(n_layers - 1): self.conv_layers.append( nn.Conv1d( @@ -89,13 +90,13 @@ class DDSConv(nn.Module): """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() + super(DDSConv, self).__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) - self.drop = nn.Dropout(p_dropout) + self.drop = nn.Dropout(float(p_dropout)) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() @@ -117,7 +118,7 @@ class DDSConv(nn.Module): self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) - def forward(self, x, x_mask, g=None): + def forward(self, x, x_mask, g: Optional[torch.Tensor] = None): if g is not None: x = x + g for i in range(self.n_layers): @@ -149,11 +150,11 @@ class WN(torch.nn.Module): self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) + self.drop = nn.Dropout(float(p_dropout)) if gin_channels != 0: cond_layer = torch.nn.Conv1d( @@ -184,15 +185,19 @@ class WN(torch.nn.Module): res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) - def forward(self, x, x_mask, g=None, **kwargs): + def forward( + self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None + ): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) - for i in range(self.n_layers): - x_in = self.in_layers[i](x) + for i, (in_layer, res_skip_layer) in enumerate( + zip(self.in_layers, self.res_skip_layers) + ): + x_in = in_layer(x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] @@ -202,7 +207,7 @@ class WN(torch.nn.Module): acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) - res_skip_acts = self.res_skip_layers[i](acts) + res_skip_acts = res_skip_layer(acts) if i < self.n_layers - 1: res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask @@ -219,6 +224,30 @@ class WN(torch.nn.Module): for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) + def __prepare_scriptable__(self): + if self.gin_channels != 0: + for hook in self.cond_layer._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): @@ -294,14 +323,15 @@ class ResBlock1(torch.nn.Module): ] ) self.convs2.apply(init_weights) + self.lrelu_slope = LRELU_SLOPE - def forward(self, x, x_mask=None): + def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None): for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) + xt = F.leaky_relu(x, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = F.leaky_relu(xt, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c2(xt) @@ -316,6 +346,23 @@ class ResBlock1(torch.nn.Module): for l in self.convs2: remove_weight_norm(l) + def __prepare_scriptable__(self): + for l in self.convs1: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + for l in self.convs2: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + class ResBlock2(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3)): @@ -345,10 +392,11 @@ class ResBlock2(torch.nn.Module): ] ) self.convs.apply(init_weights) + self.lrelu_slope = LRELU_SLOPE - def forward(self, x, x_mask=None): + def forward(self, x, x_mask: Optional[torch.Tensor] = None): for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) + xt = F.leaky_relu(x, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c(xt) @@ -361,9 +409,25 @@ class ResBlock2(torch.nn.Module): for l in self.convs: remove_weight_norm(l) + def __prepare_scriptable__(self): + for l in self.convs: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if not reverse: y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask logdet = torch.sum(-y, [1, 2]) @@ -374,18 +438,27 @@ class Log(nn.Module): class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): + # torch.jit.script() Compiled functions \ + # can't take variable number of arguments or \ + # use keyword-only arguments with defaults + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: x = torch.flip(x, [1]) if not reverse: logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) return x, logdet else: - return x + return x, torch.zeros([1], device=x.device) class ElementwiseAffine(nn.Module): def __init__(self, channels): - super().__init__() + super(ElementwiseAffine, self).__init__() self.channels = channels self.m = nn.Parameter(torch.zeros(channels, 1)) self.logs = nn.Parameter(torch.zeros(channels, 1)) @@ -414,7 +487,7 @@ class ResidualCouplingLayer(nn.Module): mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() + super(ResidualCouplingLayer, self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -429,14 +502,20 @@ class ResidualCouplingLayer(nn.Module): kernel_size, dilation_rate, n_layers, - p_dropout=p_dropout, + p_dropout=float(p_dropout), gin_channels=gin_channels, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() - def forward(self, x, x_mask, g=None, reverse=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h = self.enc(h, x_mask, g=g) @@ -455,11 +534,20 @@ class ResidualCouplingLayer(nn.Module): else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) - return x + return x, torch.zeros([1]) def remove_weight_norm(self): self.enc.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.enc._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc) + return self + class ConvFlow(nn.Module): def __init__( @@ -471,7 +559,7 @@ class ConvFlow(nn.Module): num_bins=10, tail_bound=5.0, ): - super().__init__() + super(ConvFlow, self).__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size @@ -488,7 +576,13 @@ class ConvFlow(nn.Module): self.proj.weight.data.zero_() self.proj.bias.data.zero_() - def forward(self, x, x_mask, g=None, reverse=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse=False, + ): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) h = self.convs(h, x_mask, g=g) diff --git a/infer/lib/jit/__init__.py b/infer/lib/jit/__init__.py new file mode 100644 index 0000000..d7f41dd --- /dev/null +++ b/infer/lib/jit/__init__.py @@ -0,0 +1,163 @@ +from io import BytesIO +import pickle +import time +import torch +from tqdm import tqdm +from collections import OrderedDict + + +def load_inputs(path, device, is_half=False): + parm = torch.load(path, map_location=torch.device("cpu")) + for key in parm.keys(): + parm[key] = parm[key].to(device) + if is_half and parm[key].dtype == torch.float32: + parm[key] = parm[key].half() + elif not is_half and parm[key].dtype == torch.float16: + parm[key] = parm[key].float() + return parm + + +def benchmark( + model, inputs_path, device=torch.device("cpu"), epoch=1000, is_half=False +): + parm = load_inputs(inputs_path, device, is_half) + total_ts = 0.0 + bar = tqdm(range(epoch)) + for i in bar: + start_time = time.perf_counter() + o = model(**parm) + total_ts += time.perf_counter() - start_time + print(f"num_epoch: {epoch} | avg time(ms): {(total_ts*1000)/epoch}") + + +def jit_warm_up(model, inputs_path, device=torch.device("cpu"), epoch=5, is_half=False): + benchmark(model, inputs_path, device, epoch=epoch, is_half=is_half) + + +def to_jit_model( + model_path, + model_type: str, + mode: str = "trace", + inputs_path: str = None, + device=torch.device("cpu"), + is_half=False, +): + model = None + if model_type.lower() == "synthesizer": + from .get_synthesizer import get_synthesizer + + model, _ = get_synthesizer(model_path, device) + model.forward = model.infer + elif model_type.lower() == "rmvpe": + from .get_rmvpe import get_rmvpe + + model = get_rmvpe(model_path, device) + elif model_type.lower() == "hubert": + from .get_hubert import get_hubert_model + + model = get_hubert_model(model_path, device) + model.forward = model.infer + else: + raise ValueError(f"No model type named {model_type}") + model = model.eval() + model = model.half() if is_half else model.float() + if mode == "trace": + assert not inputs_path + inputs = load_inputs(inputs_path, device, is_half) + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + elif mode == "script": + model_jit = torch.jit.script(model) + model_jit.to(device) + model_jit = model_jit.half() if is_half else model_jit.float() + # model = model.half() if is_half else model.float() + return (model, model_jit) + + +def export( + model: torch.nn.Module, + mode: str = "trace", + inputs: dict = None, + device=torch.device("cpu"), + is_half: bool = False, +) -> dict: + model = model.half() if is_half else model.float() + model.eval() + if mode == "trace": + assert inputs is not None + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + elif mode == "script": + model_jit = torch.jit.script(model) + model_jit.to(device) + model_jit = model_jit.half() if is_half else model_jit.float() + buffer = BytesIO() + # model_jit=model_jit.cpu() + torch.jit.save(model_jit, buffer) + del model_jit + cpt = OrderedDict() + cpt["model"] = buffer.getvalue() + cpt["is_half"] = is_half + return cpt + + +def load(path: str): + with open(path, "rb") as f: + return pickle.load(f) + + +def save(ckpt: dict, save_path: str): + with open(save_path, "wb") as f: + pickle.dump(ckpt, f) + + +def rmvpe_jit_export( + model_path: str, + mode: str = "script", + inputs_path: str = None, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): + if not save_path: + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" + if "cuda" in str(device) and ":" not in str(device): + device = torch.device("cuda:0") + from .get_rmvpe import get_rmvpe + + model = get_rmvpe(model_path, device) + inputs = None + if mode == "trace": + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, mode, inputs, device, is_half) + ckpt["device"] = str(device) + save(ckpt, save_path) + return ckpt + + +def synthesizer_jit_export( + model_path: str, + mode: str = "script", + inputs_path: str = None, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): + if not save_path: + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" + if "cuda" in str(device) and ":" not in str(device): + device = torch.device("cuda:0") + from .get_synthesizer import get_synthesizer + + model, cpt = get_synthesizer(model_path, device) + assert isinstance(cpt, dict) + model.forward = model.infer + inputs = None + if mode == "trace": + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, mode, inputs, device, is_half) + cpt.pop("weight") + cpt["model"] = ckpt["model"] + cpt["device"] = device + save(cpt, save_path) + return cpt diff --git a/infer/lib/jit/get_hubert.py b/infer/lib/jit/get_hubert.py new file mode 100644 index 0000000..aec7132 --- /dev/null +++ b/infer/lib/jit/get_hubert.py @@ -0,0 +1,342 @@ +import math +import random +from typing import Optional, Tuple +from fairseq.checkpoint_utils import load_model_ensemble_and_task +import numpy as np +import torch +import torch.nn.functional as F + +# from fairseq.data.data_utils import compute_mask_indices +from fairseq.utils import index_put + + +# @torch.jit.script +def pad_to_multiple(x, multiple, dim=-1, value=0): + # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 + if x is None: + return None, 0 + tsz = x.size(dim) + m = tsz / multiple + remainder = math.ceil(m) * multiple - tsz + if int(tsz % multiple) == 0: + return x, 0 + pad_offset = (0,) * (-1 - dim) * 2 + + return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder + + +def extract_features( + self, + x, + padding_mask=None, + tgt_layer=None, + min_layer=0, +): + if padding_mask is not None: + x = index_put(x, padding_mask, 0) + + x_conv = self.pos_conv(x.transpose(1, 2)) + x_conv = x_conv.transpose(1, 2) + x = x + x_conv + + if not self.layer_norm_first: + x = self.layer_norm(x) + + # pad to the sequence length dimension + x, pad_length = pad_to_multiple(x, self.required_seq_len_multiple, dim=-2, value=0) + if pad_length > 0 and padding_mask is None: + padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) + padding_mask[:, -pad_length:] = True + else: + padding_mask, _ = pad_to_multiple( + padding_mask, self.required_seq_len_multiple, dim=-1, value=True + ) + x = F.dropout(x, p=self.dropout, training=self.training) + + # B x T x C -> T x B x C + x = x.transpose(0, 1) + + layer_results = [] + r = None + for i, layer in enumerate(self.layers): + dropout_probability = np.random.random() if self.layerdrop > 0 else 1 + if not self.training or (dropout_probability > self.layerdrop): + x, (z, lr) = layer( + x, self_attn_padding_mask=padding_mask, need_weights=False + ) + if i >= min_layer: + layer_results.append((x, z, lr)) + if i == tgt_layer: + r = x + break + + if r is not None: + x = r + + # T x B x C -> B x T x C + x = x.transpose(0, 1) + + # undo paddding + if pad_length > 0: + x = x[:, :-pad_length] + + def undo_pad(a, b, c): + return ( + a[:-pad_length], + b[:-pad_length] if b is not None else b, + c[:-pad_length], + ) + + layer_results = [undo_pad(*u) for u in layer_results] + + return x, layer_results + + +def compute_mask_indices( + shape: Tuple[int, int], + padding_mask: Optional[torch.Tensor], + mask_prob: float, + mask_length: int, + mask_type: str = "static", + mask_other: float = 0.0, + min_masks: int = 0, + no_overlap: bool = False, + min_space: int = 0, + require_same_masks: bool = True, + mask_dropout: float = 0.0, +) -> torch.Tensor: + """ + Computes random mask spans for a given shape + + Args: + shape: the the shape for which to compute masks. + should be of size 2 where first element is batch size and 2nd is timesteps + padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements + mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by + number of timesteps divided by length of mask span to mask approximately this percentage of all elements. + however due to overlaps, the actual number will be smaller (unless no_overlap is True) + mask_type: how to compute mask lengths + static = fixed size + uniform = sample from uniform distribution [mask_other, mask_length*2] + normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element + poisson = sample from possion distribution with lambda = mask length + min_masks: minimum number of masked spans + no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping + min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans + require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample + mask_dropout: randomly dropout this percentage of masks in each example + """ + + bsz, all_sz = shape + mask = torch.full((bsz, all_sz), False) + + all_num_mask = int( + # add a random number for probabilistic rounding + mask_prob * all_sz / float(mask_length) + + torch.rand([1]).item() + ) + + all_num_mask = max(min_masks, all_num_mask) + + mask_idcs = [] + for i in range(bsz): + if padding_mask is not None: + sz = all_sz - padding_mask[i].long().sum().item() + num_mask = int(mask_prob * sz / float(mask_length) + np.random.rand()) + num_mask = max(min_masks, num_mask) + else: + sz = all_sz + num_mask = all_num_mask + + if mask_type == "static": + lengths = torch.full([num_mask], mask_length) + elif mask_type == "uniform": + lengths = torch.randint(mask_other, mask_length * 2 + 1, size=[num_mask]) + elif mask_type == "normal": + lengths = torch.normal(mask_length, mask_other, size=[num_mask]) + lengths = [max(1, int(round(x))) for x in lengths] + else: + raise Exception("unknown mask selection " + mask_type) + + if sum(lengths) == 0: + lengths[0] = min(mask_length, sz - 1) + + if no_overlap: + mask_idc = [] + + def arrange(s, e, length, keep_length): + span_start = torch.randint(low=s, high=e - length, size=[1]).item() + mask_idc.extend(span_start + i for i in range(length)) + + new_parts = [] + if span_start - s - min_space >= keep_length: + new_parts.append((s, span_start - min_space + 1)) + if e - span_start - length - min_space > keep_length: + new_parts.append((span_start + length + min_space, e)) + return new_parts + + parts = [(0, sz)] + min_length = min(lengths) + for length in sorted(lengths, reverse=True): + t = [e - s if e - s >= length + min_space else 0 for s, e in parts] + lens = torch.asarray(t, dtype=torch.int) + l_sum = torch.sum(lens) + if l_sum == 0: + break + probs = lens / torch.sum(lens) + c = torch.multinomial(probs.float(), len(parts)).item() + s, e = parts.pop(c) + parts.extend(arrange(s, e, length, min_length)) + mask_idc = torch.asarray(mask_idc) + else: + min_len = min(lengths) + if sz - min_len <= num_mask: + min_len = sz - num_mask - 1 + mask_idc = torch.asarray( + random.sample([i for i in range(sz - min_len)], num_mask) + ) + mask_idc = torch.asarray( + [ + mask_idc[j] + offset + for j in range(len(mask_idc)) + for offset in range(lengths[j]) + ] + ) + + mask_idcs.append(torch.unique(mask_idc[mask_idc < sz])) + + min_len = min([len(m) for m in mask_idcs]) + for i, mask_idc in enumerate(mask_idcs): + if isinstance(mask_idc, torch.Tensor): + mask_idc = torch.asarray(mask_idc, dtype=torch.float) + if len(mask_idc) > min_len and require_same_masks: + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], min_len) + ) + if mask_dropout > 0: + num_holes = int(round(len(mask_idc) * mask_dropout)) + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], len(mask_idc) - num_holes) + ) + + mask[i, mask_idc.int()] = True + + return mask + + +def apply_mask(self, x, padding_mask, target_list): + B, T, C = x.shape + torch.zeros_like(x) + if self.mask_prob > 0: + mask_indices = compute_mask_indices( + (B, T), + padding_mask, + self.mask_prob, + self.mask_length, + self.mask_selection, + self.mask_other, + min_masks=2, + no_overlap=self.no_mask_overlap, + min_space=self.mask_min_space, + ) + mask_indices = mask_indices.to(x.device) + x[mask_indices] = self.mask_emb + else: + mask_indices = None + + if self.mask_channel_prob > 0: + mask_channel_indices = compute_mask_indices( + (B, C), + None, + self.mask_channel_prob, + self.mask_channel_length, + self.mask_channel_selection, + self.mask_channel_other, + no_overlap=self.no_mask_channel_overlap, + min_space=self.mask_channel_min_space, + ) + mask_channel_indices = ( + mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1) + ) + x[mask_channel_indices] = 0 + + return x, mask_indices + + +def get_hubert_model( + model_path="assets/hubert/hubert_base.pt", device=torch.device("cpu") +): + models, _, _ = load_model_ensemble_and_task( + [model_path], + suffix="", + ) + hubert_model = models[0] + hubert_model = hubert_model.to(device) + + def _apply_mask(x, padding_mask, target_list): + return apply_mask(hubert_model, x, padding_mask, target_list) + + hubert_model.apply_mask = _apply_mask + + def _extract_features( + x, + padding_mask=None, + tgt_layer=None, + min_layer=0, + ): + return extract_features( + hubert_model.encoder, + x, + padding_mask=padding_mask, + tgt_layer=tgt_layer, + min_layer=min_layer, + ) + + hubert_model.encoder.extract_features = _extract_features + + hubert_model._forward = hubert_model.forward + + def hubert_extract_features( + self, + source: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + mask: bool = False, + ret_conv: bool = False, + output_layer: Optional[int] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + res = self._forward( + source, + padding_mask=padding_mask, + mask=mask, + features_only=True, + output_layer=output_layer, + ) + feature = res["features"] if ret_conv else res["x"] + return feature, res["padding_mask"] + + def _hubert_extract_features( + source: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + mask: bool = False, + ret_conv: bool = False, + output_layer: Optional[int] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + return hubert_extract_features( + hubert_model, source, padding_mask, mask, ret_conv, output_layer + ) + + hubert_model.extract_features = _hubert_extract_features + + def infer(source, padding_mask, output_layer: torch.Tensor): + output_layer = output_layer.item() + logits = hubert_model.extract_features( + source=source, padding_mask=padding_mask, output_layer=output_layer + ) + feats = hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] + return feats + + hubert_model.infer = infer + # hubert_model.forward=infer + # hubert_model.forward + + return hubert_model diff --git a/infer/lib/jit/get_rmvpe.py b/infer/lib/jit/get_rmvpe.py new file mode 100644 index 0000000..e71c39f --- /dev/null +++ b/infer/lib/jit/get_rmvpe.py @@ -0,0 +1,12 @@ +import torch + + +def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt", device=torch.device("cpu")): + from infer.lib.rmvpe import E2E + + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location=device) + model.load_state_dict(ckpt) + model.eval() + model = model.to(device) + return model diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py new file mode 100644 index 0000000..ef5fe58 --- /dev/null +++ b/infer/lib/jit/get_synthesizer.py @@ -0,0 +1,37 @@ +import torch + + +def get_synthesizer(pth_path, device=torch.device("cpu")): + from infer.lib.infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, + ) + + cpt = torch.load(pth_path, map_location=torch.device("cpu")) + # tgt_sr = cpt["config"][-1] + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + if_f0 = cpt.get("f0", 1) + version = cpt.get("version", "v1") + if version == "v1": + if if_f0 == 1: + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False) + else: + net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + elif version == "v2": + if if_f0 == 1: + net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=False) + else: + net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) + del net_g.enc_q + # net_g.forward = net_g.infer + # ckpt = {} + # ckpt["config"] = cpt["config"] + # ckpt["f0"] = if_f0 + # ckpt["version"] = version + # ckpt["info"] = cpt.get("info", "0epoch") + net_g.load_state_dict(cpt["weight"], strict=False) + net_g = net_g.float() + net_g.eval().to(device) + return net_g, cpt diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index d305b53..9010d28 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -1,8 +1,11 @@ -import pdb, os - +from io import BytesIO +import os +from typing import List, Optional, Tuple import numpy as np import torch +from infer.lib import jit + try: # Fix "Torch not compiled with CUDA enabled" import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import @@ -11,7 +14,7 @@ try: from infer.modules.ipex import ipex_init ipex_init() -except Exception: +except Exception: # pylint: disable=broad-exception-caught pass import torch.nn as nn import torch.nn.functional as F @@ -23,58 +26,6 @@ import logging logger = logging.getLogger(__name__) -###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py -def window_sumsquare( - window, - n_frames, - hop_length=200, - win_length=800, - n_fft=800, - dtype=np.float32, - norm=None, -): - """ - # from librosa 0.6 - Compute the sum-square envelope of a window function at a given hop length. - This is used to estimate modulation effects induced by windowing - observations in short-time fourier transforms. - Parameters - ---------- - window : string, tuple, number, callable, or list-like - Window specification, as in `get_window` - n_frames : int > 0 - The number of analysis frames - hop_length : int > 0 - The number of samples to advance between frames - win_length : [optional] - The length of the window function. By default, this matches `n_fft`. - n_fft : int > 0 - The length of each analysis frame. - dtype : np.dtype - The data type of the output - Returns - ------- - wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` - The sum-squared envelope of the window function - """ - if win_length is None: - win_length = n_fft - - n = n_fft + hop_length * (n_frames - 1) - x = np.zeros(n, dtype=dtype) - - # Compute the squared window at the desired length - win_sq = get_window(window, win_length, fftbins=True) - win_sq = normalize(win_sq, norm=norm) ** 2 - win_sq = pad_center(win_sq, n_fft) - - # Fill the envelope - for i in range(n_frames): - sample = i * hop_length - x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))] - return x - - class STFT(torch.nn.Module): def __init__( self, filter_length=1024, hop_length=512, win_length=None, window="hann" @@ -101,17 +52,14 @@ class STFT(torch.nn.Module): self.window = window self.forward_transform = None self.pad_amount = int(self.filter_length / 2) - scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) + forward_basis = torch.FloatTensor(fourier_basis) + inverse_basis = torch.FloatTensor(np.linalg.pinv(fourier_basis)) assert filter_length >= self.win_length # get window and zero center pad it to filter_length @@ -121,12 +69,13 @@ class STFT(torch.nn.Module): # window the bases forward_basis *= fft_window - inverse_basis *= fft_window + inverse_basis = (inverse_basis.T * fft_window).T self.register_buffer("forward_basis", forward_basis.float()) self.register_buffer("inverse_basis", inverse_basis.float()) + self.register_buffer("fft_window", fft_window.float()) - def transform(self, input_data): + def transform(self, input_data, return_phase=False): """Take input data (audio) to STFT domain. Arguments: @@ -138,33 +87,24 @@ class STFT(torch.nn.Module): phase {tensor} -- Phase of STFT with shape (num_batch, num_frequencies, num_frames) """ - num_batches = input_data.shape[0] - num_samples = input_data.shape[-1] - - self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - # print(1234,input_data.shape) input_data = F.pad( - input_data.unsqueeze(1), - (self.pad_amount, self.pad_amount, 0, 0, 0, 0), + input_data, + (self.pad_amount, self.pad_amount), mode="reflect", - ).squeeze(1) - # print(2333,input_data.shape,self.forward_basis.shape,self.hop_length) - # pdb.set_trace() - forward_transform = F.conv1d( - input_data, self.forward_basis, stride=self.hop_length, padding=0 ) - + forward_transform = input_data.unfold( + 1, self.filter_length, self.hop_length + ).permute(0, 2, 1) + forward_transform = torch.matmul(self.forward_basis, forward_transform) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] - magnitude = torch.sqrt(real_part**2 + imag_part**2) - # phase = torch.atan2(imag_part.data, real_part.data) - - return magnitude # , phase + if return_phase: + phase = torch.atan2(imag_part.data, real_part.data) + return magnitude, phase + else: + return magnitude def inverse(self, magnitude, phase): """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced @@ -180,42 +120,25 @@ class STFT(torch.nn.Module): inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of shape (num_batch, num_samples) """ - recombine_magnitude_phase = torch.cat( + cat = torch.cat( [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 ) - - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - self.inverse_basis, - stride=self.hop_length, - padding=0, + fold = torch.nn.Fold( + output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length), + kernel_size=(1, self.filter_length), + stride=(1, self.hop_length), ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[..., self.pad_amount :] - inverse_transform = inverse_transform[..., : self.num_samples] - inverse_transform = inverse_transform.squeeze(1) - + inverse_transform = torch.matmul(self.inverse_basis, cat) + inverse_transform = fold(inverse_transform)[ + :, 0, 0, self.pad_amount : -self.pad_amount + ] + window_square_sum = ( + self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0) + ) + window_square_sum = fold(window_square_sum)[ + :, 0, 0, self.pad_amount : -self.pad_amount + ] + inverse_transform /= window_square_sum return inverse_transform def forward(self, input_data): @@ -228,7 +151,7 @@ class STFT(torch.nn.Module): reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of shape (num_batch, num_samples) """ - self.magnitude, self.phase = self.transform(input_data) + self.magnitude, self.phase = self.transform(input_data, return_phase=True) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction @@ -276,17 +199,15 @@ class ConvBlockRes(nn.Module): nn.BatchNorm2d(out_channels, momentum=momentum), nn.ReLU(), ) + # self.shortcut:Optional[nn.Module] = None if in_channels != out_channels: self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: + def forward(self, x: torch.Tensor): + if not hasattr(self, "shortcut"): return self.conv(x) + x + else: + return self.conv(x) + self.shortcut(x) class Encoder(nn.Module): @@ -318,12 +239,12 @@ class Encoder(nn.Module): self.out_size = in_size self.out_channel = out_channels - def forward(self, x): - concat_tensors = [] + def forward(self, x: torch.Tensor): + concat_tensors: List[torch.Tensor] = [] x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) + for i, layer in enumerate(self.layers): + t, x = layer(x) + concat_tensors.append(t) return x, concat_tensors @@ -342,8 +263,8 @@ class ResEncoderBlock(nn.Module): self.pool = nn.AvgPool2d(kernel_size=kernel_size) def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) + for i, conv in enumerate(self.conv): + x = conv(x) if self.kernel_size is not None: return x, self.pool(x) else: @@ -364,8 +285,8 @@ class Intermediate(nn.Module): # ) def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) + for i, layer in enumerate(self.layers): + x = layer(x) return x @@ -395,8 +316,8 @@ class ResDecoderBlock(nn.Module): def forward(self, x, concat_tensor): x = self.conv1(x) x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) + for i, conv2 in enumerate(self.conv2): + x = conv2(x) return x @@ -412,9 +333,9 @@ class Decoder(nn.Module): ) in_channels = out_channels - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) + def forward(self, x: torch.Tensor, concat_tensors: List[torch.Tensor]): + for i, layer in enumerate(self.layers): + x = layer(x, concat_tensors[-1 - i]) return x @@ -442,7 +363,7 @@ class DeepUnet(nn.Module): self.encoder.out_channel, en_de_layers, kernel_size, n_blocks ) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: x, concat_tensors = self.encoder(x) x = self.intermediate(x) x = self.decoder(x, concat_tensors) @@ -536,33 +457,28 @@ class MelSpectrogram(torch.nn.Module): keyshift_key = str(keyshift) + "_" + str(audio.device) if keyshift_key not in self.hann_window: self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - # "cpu"if(audio.device.type=="privateuseone") else audio.device audio.device ) - # fft = torch.stft(#doesn't support pytorch_dml - # # audio.cpu() if(audio.device.type=="privateuseone")else audio, - # audio, - # n_fft=n_fft_new, - # hop_length=hop_length_new, - # win_length=win_length_new, - # window=self.hann_window[keyshift_key], - # center=center, - # return_complex=True, - # ) - # magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - # print(1111111111) - # print(222222222222222,audio.device,self.is_half) - if hasattr(self, "stft") == False: - # print(n_fft_new,hop_length_new,win_length_new,audio.shape) - self.stft = STFT( - filter_length=n_fft_new, + if "privateuseone" in str(audio.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window="hann", + ).to(audio.device) + magnitude = self.stft.transform(audio) + else: + fft = torch.stft( + audio, + n_fft=n_fft_new, hop_length=hop_length_new, win_length=win_length_new, - window="hann", - ).to(audio.device) - magnitude = self.stft.transform(audio) # phase - # if (audio.device.type == "privateuseone"): - # magnitude=magnitude.to(audio.device) + window=self.hann_window[keyshift_key], + center=center, + return_complex=True, + ) + magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) if keyshift != 0: size = self.n_fft // 2 + 1 resize = magnitude.size(1) @@ -573,17 +489,16 @@ class MelSpectrogram(torch.nn.Module): if self.is_half == True: mel_output = mel_output.half() log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - # print(log_mel_spec.device.type) return log_mel_spec class RMVPE: - def __init__(self, model_path, is_half, device=None): + def __init__(self, model_path: str, is_half, device=None, use_jit=False): self.resample_kernel = {} self.resample_kernel = {} self.is_half = is_half if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" + device = "cuda:0" if torch.cuda.is_available() else "cpu" self.device = device self.mel_extractor = MelSpectrogram( is_half, 128, 16000, 1024, 160, None, 30, 8000 @@ -597,13 +512,56 @@ class RMVPE: ) self.model = ort_session else: - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model + if str(self.device) == "cuda": + self.device = torch.device("cuda:0") + + def get_jit_model(): + jit_model_path = model_path.rstrip(".pth") + jit_model_path += ".half.jit" if is_half else ".jit" + reload = False + if os.path.exists(jit_model_path): + ckpt = jit.load(jit_model_path) + model_device = ckpt["device"] + if model_device != str(self.device): + reload = True + else: + reload = True + + if reload: + ckpt = jit.rmvpe_jit_export( + model_path=model_path, + mode="script", + inputs_path=None, + save_path=jit_model_path, + device=device, + is_half=is_half, + ) + model = torch.jit.load(BytesIO(ckpt["model"]), map_location=device) + return model + + def get_default_model(): + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location="cpu") + model.load_state_dict(ckpt) + model.eval() + if is_half: + model = model.half() + else: + model = model.float() + return model + + if use_jit: + if is_half and "cpu" in str(self.device): + logger.warning( + "Use default rmvpe model. \ + Jit is not supported on the CPU for half floating point" + ) + self.model = get_default_model() + else: + self.model = get_jit_model() + else: + self.model = get_default_model() + self.model = self.model.to(device) cents_mapping = 20 * np.arange(360) + 1997.3794084376191 self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 @@ -611,9 +569,9 @@ class RMVPE: def mel2hidden(self, mel): with torch.no_grad(): n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="constant" - ) + n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames + if n_pad > 0: + mel = F.pad(mel, (0, n_pad), mode="constant") if "privateuseone" in str(self.device): onnx_input_name = self.model.get_inputs()[0].name onnx_outputs_names = self.model.get_outputs()[0].name @@ -622,6 +580,7 @@ class RMVPE: input_feed={onnx_input_name: mel.cpu().numpy()}, )[0] else: + mel = mel.half() if self.is_half else mel.float() hidden = self.model(mel) return hidden[:, :n_frames] diff --git a/infer/modules/ipex/__init__.py b/infer/modules/ipex/__init__.py index f8ad98a..cd27bc1 100644 --- a/infer/modules/ipex/__init__.py +++ b/infer/modules/ipex/__init__.py @@ -17,7 +17,6 @@ def ipex_init(): # pylint: disable=too-many-statements torch.cuda.device = torch.xpu.device torch.cuda.device_count = torch.xpu.device_count torch.cuda.device_of = torch.xpu.device_of - torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard torch.cuda.get_device_name = torch.xpu.get_device_name torch.cuda.get_device_properties = torch.xpu.get_device_properties torch.cuda.init = torch.xpu.init @@ -169,9 +168,23 @@ def ipex_init(): # pylint: disable=too-many-statements torch.cuda.get_device_properties.minor = 7 torch.cuda.ipc_collect = lambda *args, **kwargs: None torch.cuda.utilization = lambda *args, **kwargs: 0 + if hasattr(torch.xpu, "getDeviceIdListForCard"): + torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard + torch.cuda.get_device_id_list_per_card = torch.xpu.getDeviceIdListForCard + else: + torch.cuda.getDeviceIdListForCard = torch.xpu.get_device_id_list_per_card + torch.cuda.get_device_id_list_per_card = ( + torch.xpu.get_device_id_list_per_card + ) ipex_hijacks() attention_init() + try: + from .diffusers import ipex_diffusers + + ipex_diffusers() + except Exception: # pylint: disable=broad-exception-caught + pass except Exception as e: return False, e return True, None diff --git a/infer/modules/ipex/attention.py b/infer/modules/ipex/attention.py index be17f7a..0cc2803 100644 --- a/infer/modules/ipex/attention.py +++ b/infer/modules/ipex/attention.py @@ -16,17 +16,15 @@ def torch_bmm(input, mat2, *, out=None): input.shape[1], mat2.shape[2], ) - block_multiply = 2.4 if input.dtype == torch.float32 else 1.2 - block_size = ( - (batch_size_attention * input_tokens * mat2_shape) / 1024 * block_multiply - ) # MB + block_multiply = input.element_size() + slice_block_size = input_tokens * mat2_shape / 1024 / 1024 * block_multiply + block_size = batch_size_attention * slice_block_size + split_slice_size = batch_size_attention - if block_size >= 4000: + if block_size > 4: do_split = True # Find something divisible with the input_tokens - while ( - (split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply - ) > 4000: + while (split_slice_size * slice_block_size) > 4: split_slice_size = split_slice_size // 2 if split_slice_size <= 1: split_slice_size = 1 @@ -34,16 +32,12 @@ def torch_bmm(input, mat2, *, out=None): else: do_split = False - split_block_size = ( - (split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply - ) # MB split_2_slice_size = input_tokens - if split_block_size >= 4000: + if split_slice_size * slice_block_size > 4: + slice_block_size2 = split_slice_size * mat2_shape / 1024 / 1024 * block_multiply do_split_2 = True # Find something divisible with the input_tokens - while ( - (split_slice_size * split_2_slice_size * mat2_shape) / 1024 * block_multiply - ) > 4000: + while (split_2_slice_size * slice_block_size2) > 4: split_2_slice_size = split_2_slice_size // 2 if split_2_slice_size <= 1: split_2_slice_size = 1 @@ -91,22 +85,25 @@ def scaled_dot_product_attention( query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False ): # ARC GPUs can't allocate more than 4GB to a single block, Slice it: - shape_one, batch_size_attention, query_tokens, shape_four = query.shape - block_multiply = 2.4 if query.dtype == torch.float32 else 1.2 - block_size = ( - (shape_one * batch_size_attention * query_tokens * shape_four) - / 1024 - * block_multiply - ) # MB + if len(query.shape) == 3: + batch_size_attention, query_tokens, shape_four = query.shape + shape_one = 1 + no_shape_one = True + else: + shape_one, batch_size_attention, query_tokens, shape_four = query.shape + no_shape_one = False + + block_multiply = query.element_size() + slice_block_size = ( + shape_one * query_tokens * shape_four / 1024 / 1024 * block_multiply + ) + block_size = batch_size_attention * slice_block_size + split_slice_size = batch_size_attention - if block_size >= 4000: + if block_size > 4: do_split = True # Find something divisible with the shape_one - while ( - (shape_one * split_slice_size * query_tokens * shape_four) - / 1024 - * block_multiply - ) > 4000: + while (split_slice_size * slice_block_size) > 4: split_slice_size = split_slice_size // 2 if split_slice_size <= 1: split_slice_size = 1 @@ -114,20 +111,14 @@ def scaled_dot_product_attention( else: do_split = False - split_block_size = ( - (shape_one * split_slice_size * query_tokens * shape_four) - / 1024 - * block_multiply - ) # MB split_2_slice_size = query_tokens - if split_block_size >= 4000: + if split_slice_size * slice_block_size > 4: + slice_block_size2 = ( + shape_one * split_slice_size * shape_four / 1024 / 1024 * block_multiply + ) do_split_2 = True # Find something divisible with the batch_size_attention - while ( - (shape_one * split_slice_size * split_2_slice_size * shape_four) - / 1024 - * block_multiply - ) > 4000: + while (split_2_slice_size * slice_block_size2) > 4: split_2_slice_size = split_2_slice_size // 2 if split_2_slice_size <= 1: split_2_slice_size = 1 @@ -146,31 +137,63 @@ def scaled_dot_product_attention( ): # pylint: disable=invalid-name start_idx_2 = i2 * split_2_slice_size end_idx_2 = (i2 + 1) * split_2_slice_size + if no_shape_one: + hidden_states[ + start_idx:end_idx, start_idx_2:end_idx_2 + ] = original_scaled_dot_product_attention( + query[start_idx:end_idx, start_idx_2:end_idx_2], + key[start_idx:end_idx, start_idx_2:end_idx_2], + value[start_idx:end_idx, start_idx_2:end_idx_2], + attn_mask=attn_mask[ + start_idx:end_idx, start_idx_2:end_idx_2 + ] + if attn_mask is not None + else attn_mask, + dropout_p=dropout_p, + is_causal=is_causal, + ) + else: + hidden_states[ + :, start_idx:end_idx, start_idx_2:end_idx_2 + ] = original_scaled_dot_product_attention( + query[:, start_idx:end_idx, start_idx_2:end_idx_2], + key[:, start_idx:end_idx, start_idx_2:end_idx_2], + value[:, start_idx:end_idx, start_idx_2:end_idx_2], + attn_mask=attn_mask[ + :, start_idx:end_idx, start_idx_2:end_idx_2 + ] + if attn_mask is not None + else attn_mask, + dropout_p=dropout_p, + is_causal=is_causal, + ) + else: + if no_shape_one: hidden_states[ - :, start_idx:end_idx, start_idx_2:end_idx_2 + start_idx:end_idx ] = original_scaled_dot_product_attention( - query[:, start_idx:end_idx, start_idx_2:end_idx_2], - key[:, start_idx:end_idx, start_idx_2:end_idx_2], - value[:, start_idx:end_idx, start_idx_2:end_idx_2], - attn_mask=attn_mask[:, start_idx:end_idx, start_idx_2:end_idx_2] + query[start_idx:end_idx], + key[start_idx:end_idx], + value[start_idx:end_idx], + attn_mask=attn_mask[start_idx:end_idx] + if attn_mask is not None + else attn_mask, + dropout_p=dropout_p, + is_causal=is_causal, + ) + else: + hidden_states[ + :, start_idx:end_idx + ] = original_scaled_dot_product_attention( + query[:, start_idx:end_idx], + key[:, start_idx:end_idx], + value[:, start_idx:end_idx], + attn_mask=attn_mask[:, start_idx:end_idx] if attn_mask is not None else attn_mask, dropout_p=dropout_p, is_causal=is_causal, ) - else: - hidden_states[ - :, start_idx:end_idx - ] = original_scaled_dot_product_attention( - query[:, start_idx:end_idx], - key[:, start_idx:end_idx], - value[:, start_idx:end_idx], - attn_mask=attn_mask[:, start_idx:end_idx] - if attn_mask is not None - else attn_mask, - dropout_p=dropout_p, - is_causal=is_causal, - ) else: return original_scaled_dot_product_attention( query, diff --git a/infer/modules/train/train.py b/infer/modules/train/train.py index 763ad06..ad9a5b5 100644 --- a/infer/modules/train/train.py +++ b/infer/modules/train/train.py @@ -23,14 +23,16 @@ try: if torch.xpu.is_available(): from infer.modules.ipex import ipex_init - from infer.modules.ipex.gradscaler import gradscaler_init + + ipex_init() + from torch.xpu.amp import autocast + from infer.modules.ipex.gradscaler import gradscaler_init GradScaler = gradscaler_init() - ipex_init() else: from torch.cuda.amp import GradScaler, autocast -except Exception: +except Exception: # pylint: disable=broad-exception-caught from torch.cuda.amp import GradScaler, autocast torch.backends.cudnn.deterministic = False @@ -104,14 +106,11 @@ def main(): os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) children = [] + logger = utils.get_logger(hps.model_dir) for i in range(n_gpus): subproc = mp.Process( target=run, - args=( - i, - n_gpus, - hps, - ), + args=(i, n_gpus, hps, logger), ) children.append(subproc) subproc.start() @@ -120,10 +119,10 @@ def main(): children[i].join() -def run(rank, n_gpus, hps): +def run(rank, n_gpus, hps, logger: logging.Logger): global global_step if rank == 0: - logger = utils.get_logger(hps.model_dir) + # logger = utils.get_logger(hps.model_dir) logger.info(hps) # utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) diff --git a/infer/modules/uvr5/preprocess.py b/infer/modules/uvr5/preprocess.py index 19f1111..c22b291 100644 --- a/infer/modules/uvr5/preprocess.py +++ b/infer/modules/uvr5/preprocess.py @@ -16,13 +16,13 @@ from infer.lib.uvr5_pack.utils import inference class AudioPre: - def __init__(self, agg, model_path, device, is_half): + def __init__(self, agg, model_path, device, is_half, tta=False): self.model_path = model_path self.device = device self.data = { # Processing Options "postprocess": False, - "tta": False, + "tta": tta, # Constants "window_size": 512, "agg": agg, @@ -180,13 +180,13 @@ class AudioPre: class AudioPreDeEcho: - def __init__(self, agg, model_path, device, is_half): + def __init__(self, agg, model_path, device, is_half, tta=False): self.model_path = model_path self.device = device self.data = { # Processing Options "postprocess": False, - "tta": False, + "tta": tta, # Constants "window_size": 512, "agg": agg, diff --git a/infer/modules/vc/modules.py b/infer/modules/vc/modules.py index d90379b..3e7cdbb 100644 --- a/infer/modules/vc/modules.py +++ b/infer/modules/vc/modules.py @@ -54,16 +54,10 @@ class VC: if sid == "" or sid == []: if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 logger.info("Clean model cache") - del ( - self.net_g, - self.n_spk, - self.vc, - self.hubert_model, - self.tgt_sr, - ) # ,cpt + del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt self.hubert_model = ( self.net_g - ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None + ) = self.n_spk = self.hubert_model = self.tgt_sr = None if torch.cuda.is_available(): torch.cuda.empty_cache() ###楼下不这么折腾清理不干净 diff --git a/modules.py b/modules.py deleted file mode 100644 index d90379b..0000000 --- a/modules.py +++ /dev/null @@ -1,307 +0,0 @@ -import traceback -import logging - -logger = logging.getLogger(__name__) - -import numpy as np -import soundfile as sf -import torch -from io import BytesIO - -from infer.lib.audio import load_audio, wav2 -from infer.lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from infer.modules.vc.pipeline import Pipeline -from infer.modules.vc.utils import * - - -class VC: - def __init__(self, config): - self.n_spk = None - self.tgt_sr = None - self.net_g = None - self.pipeline = None - self.cpt = None - self.version = None - self.if_f0 = None - self.version = None - self.hubert_model = None - - self.config = config - - def get_vc(self, sid, *to_return_protect): - logger.info("Get sid: " + sid) - - to_return_protect0 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[0] - if self.if_f0 != 0 and to_return_protect - else 0.5, - "__type__": "update", - } - to_return_protect1 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[1] - if self.if_f0 != 0 and to_return_protect - else 0.33, - "__type__": "update", - } - - if sid == "" or sid == []: - if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - logger.info("Clean model cache") - del ( - self.net_g, - self.n_spk, - self.vc, - self.hubert_model, - self.tgt_sr, - ) # ,cpt - self.hubert_model = ( - self.net_g - ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) - del self.net_g, self.cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return ( - {"visible": False, "__type__": "update"}, - { - "visible": True, - "value": to_return_protect0, - "__type__": "update", - }, - { - "visible": True, - "value": to_return_protect1, - "__type__": "update", - }, - "", - "", - ) - person = f'{os.getenv("weight_root")}/{sid}' - logger.info(f"Loading: {person}") - - self.cpt = torch.load(person, map_location="cpu") - self.tgt_sr = self.cpt["config"][-1] - self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - - synthesizer_class = { - ("v1", 1): SynthesizerTrnMs256NSFsid, - ("v1", 0): SynthesizerTrnMs256NSFsid_nono, - ("v2", 1): SynthesizerTrnMs768NSFsid, - ("v2", 0): SynthesizerTrnMs768NSFsid_nono, - } - - self.net_g = synthesizer_class.get( - (self.version, self.if_f0), SynthesizerTrnMs256NSFsid - )(*self.cpt["config"], is_half=self.config.is_half) - - del self.net_g.enc_q - - self.net_g.load_state_dict(self.cpt["weight"], strict=False) - self.net_g.eval().to(self.config.device) - if self.config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - - self.pipeline = Pipeline(self.tgt_sr, self.config) - n_spk = self.cpt["config"][-3] - index = {"value": get_index_path_from_model(sid), "__type__": "update"} - logger.info("Select index: " + index["value"]) - - return ( - ( - {"visible": True, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1, - index, - index, - ) - if to_return_protect - else {"visible": True, "maximum": n_spk, "__type__": "update"} - ) - - def vc_single( - self, - sid, - input_audio_path, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - ): - if input_audio_path is None: - return "You need to upload an audio", None - f0_up_key = int(f0_up_key) - try: - audio = load_audio(input_audio_path, 16000) - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(self.config) - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - f0_file, - ) - if self.tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - else: - tgt_sr = self.tgt_sr - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - return ( - "Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs." - % (index_info, *times), - (tgt_sr, audio_opt), - ) - except: - info = traceback.format_exc() - logger.warning(info) - return info, (None, None) - - def vc_multi( - self, - sid, - dir_path, - opt_root, - paths, - f0_up_key, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - ): - try: - dir_path = ( - dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - os.makedirs(opt_root, exist_ok=True) - try: - if dir_path != "": - paths = [ - os.path.join(dir_path, name) for name in os.listdir(dir_path) - ] - else: - paths = [path.name for path in paths] - except: - traceback.print_exc() - paths = [path.name for path in paths] - infos = [] - for path in paths: - info, opt = self.vc_single( - sid, - path, - f0_up_key, - None, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - ) - if "Success" in info: - try: - tgt_sr, audio_opt = opt - if format1 in ["wav", "flac"]: - sf.write( - "%s/%s.%s" - % (opt_root, os.path.basename(path), format1), - audio_opt, - tgt_sr, - ) - else: - path = "%s/%s.%s" % ( - opt_root, - os.path.basename(path), - format1, - ) - with BytesIO() as wavf: - sf.write(wavf, audio_opt, tgt_sr, format="wav") - wavf.seek(0, 0) - with open(path, "wb") as outf: - wav2(wavf, outf, format1) - except: - info += traceback.format_exc() - infos.append("%s->%s" % (os.path.basename(path), info)) - yield "\n".join(infos) - yield "\n".join(infos) - except: - yield traceback.format_exc() diff --git a/requirements-ipex.txt b/requirements-ipex.txt index 1a96cf0..610a0ce 100644 --- a/requirements-ipex.txt +++ b/requirements-ipex.txt @@ -2,7 +2,7 @@ torch==2.0.1a0 intel_extension_for_pytorch==2.0.110+xpu torchvision==0.15.2a0 https://github.com/Disty0/Retrieval-based-Voice-Conversion-WebUI/releases/download/torchaudio_wheels_for_ipex/torchaudio-2.0.2+31de77d-cp310-cp310-linux_x86_64.whl --f https://developer.intel.com/ipex-whl-stable-xpu +--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ joblib>=1.1.0 numba==0.56.4 numpy==1.23.5 diff --git a/tools/download_models.py b/tools/download_models.py new file mode 100644 index 0000000..94e0389 --- /dev/null +++ b/tools/download_models.py @@ -0,0 +1,79 @@ +import os +from pathlib import Path +import requests + +RVC_DOWNLOAD_LINK = "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/" + +BASE_DIR = Path(__file__).resolve().parent.parent + + +def dl_model(link, model_name, dir_name): + with requests.get(f"{link}{model_name}") as r: + r.raise_for_status() + os.makedirs(os.path.dirname(dir_name / model_name), exist_ok=True) + with open(dir_name / model_name, "wb") as f: + for chunk in r.iter_content(chunk_size=8192): + f.write(chunk) + + +if __name__ == "__main__": + print("Downloading hubert_base.pt...") + dl_model(RVC_DOWNLOAD_LINK, "hubert_base.pt", BASE_DIR / "assets/hubert") + print("Downloading rmvpe.pt...") + dl_model(RVC_DOWNLOAD_LINK, "rmvpe.pt", BASE_DIR / "assets/rmvpe") + print("Downloading vocals.onnx...") + dl_model( + RVC_DOWNLOAD_LINK + "uvr5_weights/onnx_dereverb_By_FoxJoy/", + "vocals.onnx", + BASE_DIR / "assets/uvr5_weights/onnx_dereverb_By_FoxJoy", + ) + + rvc_models_dir = BASE_DIR / "assets/pretrained" + + print("Downloading pretrained models:") + + model_names = [ + "D32k.pth", + "D40k.pth", + "D48k.pth", + "G32k.pth", + "G40k.pth", + "G48k.pth", + "f0D32k.pth", + "f0D40k.pth", + "f0D48k.pth", + "f0G32k.pth", + "f0G40k.pth", + "f0G48k.pth", + ] + for model in model_names: + print(f"Downloading {model}...") + dl_model(RVC_DOWNLOAD_LINK + "pretrained/", model, rvc_models_dir) + + rvc_models_dir = BASE_DIR / "assets/pretrained_v2" + + print("Downloading pretrained models v2:") + + for model in model_names: + print(f"Downloading {model}...") + dl_model(RVC_DOWNLOAD_LINK + "pretrained_v2/", model, rvc_models_dir) + + print("Downloading uvr5_weights:") + + rvc_models_dir = BASE_DIR / "assets/uvr5_weights" + + model_names = [ + "HP2-%E4%BA%BA%E5%A3%B0vocals%2B%E9%9D%9E%E4%BA%BA%E5%A3%B0instrumentals.pth", + "HP2_all_vocals.pth", + "HP3_all_vocals.pth", + "HP5-%E4%B8%BB%E6%97%8B%E5%BE%8B%E4%BA%BA%E5%A3%B0vocals%2B%E5%85%B6%E4%BB%96instrumentals.pth", + "HP5_only_main_vocal.pth", + "VR-DeEchoAggressive.pth", + "VR-DeEchoDeReverb.pth", + "VR-DeEchoNormal.pth", + ] + for model in model_names: + print(f"Downloading {model}...") + dl_model(RVC_DOWNLOAD_LINK + "uvr5_weights/", model, rvc_models_dir) + + print("All models downloaded!") diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index 094e307..378c40b 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -1,12 +1,11 @@ +from io import BytesIO import os +import pickle import sys import traceback -import logging - -logger = logging.getLogger(__name__) - +from infer.lib import jit +from infer.lib.jit.get_synthesizer import get_synthesizer from time import time as ttime - import fairseq import faiss import numpy as np @@ -31,17 +30,16 @@ from multiprocessing import Manager as M from configs.config import Config -config = Config() +# config = Config() mm = M() -if config.dml == True: - def forward_dml(ctx, x, scale): - ctx.scale = scale - res = x.clone().detach() - return res - fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml +def printt(strr, *args): + if len(args) == 0: + print(strr) + else: + print(strr % args) # config.device=torch.device("cpu")########强制cpu测试 @@ -56,18 +54,27 @@ class RVC: n_cpu, inp_q, opt_q, - device, + config: Config, last_rvc=None, ) -> None: """ 初始化 """ try: - global config + if config.dml == True: + + def forward_dml(ctx, x, scale): + ctx.scale = scale + res = x.clone().detach() + return res + + fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml + # global config + self.config = config self.inp_q = inp_q self.opt_q = opt_q # device="cpu"########强制cpu测试 - self.device = device + self.device = config.device self.f0_up_key = key self.time_step = 160 / 16000 * 1000 self.f0_min = 50 @@ -77,11 +84,14 @@ class RVC: self.sr = 16000 self.window = 160 self.n_cpu = n_cpu + self.use_jit = self.config.use_jit + self.is_half = config.is_half + if index_rate != 0: self.index = faiss.read_index(index_path) self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - logger.info("Index search enabled") - self.pth_path = pth_path + printt("Index search enabled") + self.pth_path: str = pth_path self.index_path = index_path self.index_rate = index_rate @@ -91,8 +101,8 @@ class RVC: suffix="", ) hubert_model = models[0] - hubert_model = hubert_model.to(device) - if config.is_half: + hubert_model = hubert_model.to(self.device) + if self.is_half: hubert_model = hubert_model.half() else: hubert_model = hubert_model.float() @@ -101,46 +111,80 @@ class RVC: else: self.model = last_rvc.model - if last_rvc is None or last_rvc.pth_path != self.pth_path: - cpt = torch.load(self.pth_path, map_location="cpu") + self.net_g: nn.Module = None + + def set_default_model(): + self.net_g, cpt = get_synthesizer(self.pth_path, self.device) self.tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] self.if_f0 = cpt.get("f0", 1) self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device - if config.is_half: + if self.is_half: self.net_g = self.net_g.half() else: self.net_g = self.net_g.float() - self.is_half = config.is_half + + def set_jit_model(): + jit_pth_path = self.pth_path.rstrip(".pth") + jit_pth_path += ".half.jit" if self.is_half else ".jit" + reload = False + if str(self.device) == "cuda": + self.device = torch.device("cuda:0") + if os.path.exists(jit_pth_path): + cpt = jit.load(jit_pth_path) + model_device = cpt["device"] + if model_device != str(self.device): + reload = True + else: + reload = True + + if reload: + cpt = jit.synthesizer_jit_export( + self.pth_path, + "script", + None, + device=self.device, + is_half=self.is_half, + ) + + self.tgt_sr = cpt["config"][-1] + self.if_f0 = cpt.get("f0", 1) + self.version = cpt.get("version", "v1") + self.net_g = torch.jit.load( + BytesIO(cpt["model"]), map_location=self.device + ) + self.net_g.infer = self.net_g.forward + self.net_g.eval().to(self.device) + + def set_synthesizer(): + if self.use_jit and not config.dml: + if self.is_half and "cpu" in str(self.device): + printt( + "Use default Synthesizer model. \ + Jit is not supported on the CPU for half floating point" + ) + set_default_model() + else: + set_jit_model() + else: + set_default_model() + + if last_rvc is None or last_rvc.pth_path != self.pth_path: + set_synthesizer() else: self.tgt_sr = last_rvc.tgt_sr self.if_f0 = last_rvc.if_f0 self.version = last_rvc.version - self.net_g = last_rvc.net_g self.is_half = last_rvc.is_half + if last_rvc.use_jit != self.use_jit: + set_synthesizer() + else: + self.net_g = last_rvc.net_g if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"): self.model_rmvpe = last_rvc.model_rmvpe except: - logger.warning(traceback.format_exc()) + printt(traceback.format_exc()) def change_key(self, new_key): self.f0_up_key = new_key @@ -149,7 +193,7 @@ class RVC: if new_index_rate != 0 and self.index_rate == 0: self.index = faiss.read_index(self.index_path) self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - logger.info("Index search enabled") + printt("Index search enabled") self.index_rate = new_index_rate def get_f0_post(self, f0): @@ -188,7 +232,7 @@ class RVC: pad_size = (p_len - len(f0) + 1) // 2 if pad_size > 0 or p_len - len(f0) - pad_size > 0: - # print(pad_size, p_len - len(f0) - pad_size) + # printt(pad_size, p_len - len(f0) - pad_size) f0 = np.pad( f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" ) @@ -243,7 +287,7 @@ class RVC: if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿pm顶替 return self.get_f0(x, f0_up_key, 1, "pm") audio = torch.tensor(np.copy(x))[None].float() - # print("using crepe,device:%s"%self.device) + # printt("using crepe,device:%s"%self.device) f0, pd = torchcrepe.predict( audio, self.sr, @@ -267,7 +311,7 @@ class RVC: if hasattr(self, "model_rmvpe") == False: from infer.lib.rmvpe import RMVPE - logger.info("Loading rmvpe model") + printt("Loading rmvpe model") self.model_rmvpe = RMVPE( # "rmvpe.pt", is_half=self.is_half if self.device.type!="privateuseone" else False, device=self.device if self.device.type!="privateuseone"else "cpu"####dml时强制对rmvpe用cpu跑 # "rmvpe.pt", is_half=False, device=self.device####dml配置 @@ -275,6 +319,7 @@ class RVC: "assets/rmvpe/rmvpe.pt", is_half=self.is_half, device=self.device, ####正常逻辑 + use_jit=self.config.use_jit, ) # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) @@ -292,7 +337,7 @@ class RVC: f0method, ) -> np.ndarray: feats = feats.view(1, -1) - if config.is_half: + if self.config.is_half: feats = feats.half() else: feats = feats.float() @@ -319,17 +364,17 @@ class RVC: weight = np.square(1 / score) weight /= weight.sum(axis=1, keepdims=True) npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: + if self.config.is_half: npy = npy.astype("float16") feats[0][-leng_replace_head:] = ( torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate + (1 - self.index_rate) * feats[0][-leng_replace_head:] ) else: - logger.warning("Index search FAILED or disabled") + printt("Index search FAILED or disabled") except: - traceback.print_exc() - logger.warning("Index search FAILED") + traceback.printt_exc() + printt("Index search FAILED") feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) t3 = ttime() if self.if_f0 == 1: @@ -356,16 +401,21 @@ class RVC: sid = torch.LongTensor([ii]).to(self.device) with torch.no_grad(): if self.if_f0 == 1: - # print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2) + # printt(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2) infered_audio = self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate + feats, + p_len, + cache_pitch, + cache_pitchf, + sid, + torch.FloatTensor([rate]), )[0][0, 0].data.float() else: - infered_audio = self.net_g.infer(feats, p_len, sid, rate)[0][ - 0, 0 - ].data.float() + infered_audio = self.net_g.infer( + feats, p_len, sid, torch.FloatTensor([rate]) + )[0][0, 0].data.float() t5 = ttime() - logger.info( + printt( "Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs", t2 - t1, t3 - t2, diff --git a/tools/torchgate/torchgate.py b/tools/torchgate/torchgate.py index f95ffef..e4b80c4 100644 --- a/tools/torchgate/torchgate.py +++ b/tools/torchgate/torchgate.py @@ -1,4 +1,5 @@ import torch +from infer.lib.rmvpe import STFT from torch.nn.functional import conv1d, conv2d from typing import Union, Optional from .utils import linspace, temperature_sigmoid, amp_to_db @@ -139,17 +140,26 @@ class TorchGate(torch.nn.Module): are set to 1, and the rest are set to 0. """ if xn is not None: - XN = torch.stft( - xn, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(xn.device), - ) - + if "privateuseone" in str(xn.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window="hann", + ).to(xn.device) + XN = self.stft.transform(xn) + else: + XN = torch.stft( + xn, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(xn.device), + ) XN_db = amp_to_db(XN).to(dtype=X_db.dtype) else: XN_db = X_db @@ -213,16 +223,26 @@ class TorchGate(torch.nn.Module): """ # Compute short-time Fourier transform (STFT) - X = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(x.device), - ) + if "privateuseone" in str(x.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window="hann", + ).to(x.device) + X, phase = self.stft.transform(x, return_phase=True) + else: + X = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(x.device), + ) # Compute signal mask based on stationary or nonstationary assumptions if self.nonstationary: @@ -231,7 +251,7 @@ class TorchGate(torch.nn.Module): sig_mask = self._stationary_mask(amp_to_db(X), xn) # Propagate decrease in signal power - sig_mask = self.prop_decrease * (sig_mask * 1.0 - 1.0) + 1.0 + sig_mask = self.prop_decrease * (sig_mask.float() - 1.0) + 1.0 # Smooth signal mask with 2D convolution if self.smoothing_filter is not None: @@ -245,13 +265,16 @@ class TorchGate(torch.nn.Module): Y = X * sig_mask.squeeze(1) # Inverse STFT to obtain time-domain signal - y = torch.istft( - Y, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=True, - window=torch.hann_window(self.win_length).to(Y.device), - ) + if "privateuseone" in str(Y.device): + y = self.stft.inverse(Y, phase) + else: + y = torch.istft( + Y, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + center=True, + window=torch.hann_window(self.win_length).to(Y.device), + ) return y.to(dtype=x.dtype)