From 6a37c04cfb91b7691f6ad096c538a225011d9cd3 Mon Sep 17 00:00:00 2001 From: Anjok07 <68268275+Anjok07@users.noreply.github.com> Date: Sat, 23 Jul 2022 02:59:55 -0500 Subject: [PATCH] Delete demucs directory --- demucs/__pycache__/apply.cpython-39.pyc | Bin 7179 -> 0 bytes demucs/__pycache__/audio.cpython-39.pyc | Bin 8551 -> 0 bytes demucs/__pycache__/demucs.cpython-39.pyc | Bin 14082 -> 0 bytes demucs/__pycache__/hdemucs.cpython-39.pyc | Bin 20265 -> 0 bytes demucs/__pycache__/pretrained.cpython-39.pyc | Bin 2385 -> 0 bytes demucs/__pycache__/repo.cpython-39.pyc | Bin 6060 -> 0 bytes demucs/__pycache__/spec.cpython-39.pyc | Bin 1096 -> 0 bytes demucs/__pycache__/states.cpython-39.pyc | Bin 4443 -> 0 bytes demucs/__pycache__/utils.cpython-39.pyc | Bin 4895 -> 0 bytes demucs/apply.py | 237 ------ demucs/audio.py | 256 ------- demucs/demucs.py | 459 ----------- demucs/distrib.py | 100 --- demucs/ema.py | 66 -- demucs/evaluate.py | 173 ----- demucs/hdemucs.py | 761 ------------------- demucs/pretrained.py | 66 -- demucs/repo.py | 153 ---- demucs/separate.py | 186 ----- demucs/solver.py | 404 ---------- demucs/spec.py | 41 - demucs/states.py | 148 ---- demucs/svd.py | 83 -- demucs/utils.py | 131 ---- demucs/wav.py | 242 ------ demucs/wdemucs.py | 9 - 26 files changed, 3515 deletions(-) delete mode 100644 demucs/__pycache__/apply.cpython-39.pyc delete mode 100644 demucs/__pycache__/audio.cpython-39.pyc delete mode 100644 demucs/__pycache__/demucs.cpython-39.pyc delete mode 100644 demucs/__pycache__/hdemucs.cpython-39.pyc delete mode 100644 demucs/__pycache__/pretrained.cpython-39.pyc delete mode 100644 demucs/__pycache__/repo.cpython-39.pyc delete mode 100644 demucs/__pycache__/spec.cpython-39.pyc delete mode 100644 demucs/__pycache__/states.cpython-39.pyc delete mode 100644 demucs/__pycache__/utils.cpython-39.pyc delete mode 100644 demucs/apply.py delete mode 100644 demucs/audio.py delete mode 100644 demucs/demucs.py delete mode 100644 demucs/distrib.py delete mode 100644 demucs/ema.py delete mode 100644 demucs/evaluate.py delete mode 100644 demucs/hdemucs.py delete mode 100644 demucs/pretrained.py delete mode 100644 demucs/repo.py delete mode 100644 demucs/separate.py delete mode 100644 demucs/solver.py delete mode 100644 demucs/spec.py delete mode 100644 demucs/states.py delete mode 100644 demucs/svd.py delete mode 100644 demucs/utils.py delete mode 100644 demucs/wav.py delete mode 100644 demucs/wdemucs.py diff --git a/demucs/__pycache__/apply.cpython-39.pyc b/demucs/__pycache__/apply.cpython-39.pyc deleted file mode 100644 index cbd1c41a0432e27bb07d26373a59b91a8bc50860..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7179 zcma)B%X1q?dY_&L9s?4B_$Kul$+AprX%X6M$Bv@N57GA8$Dw_3wLhf1ljxXk>*-CMQQZiQ zrlHPJKXEW<2a)jmJPutE#F4Y*iv!<}vIWU)$4#6AA9U~*UEjHp zbc2p?1P|J~HyZXcK&aJOI`gp0eb@Uqj>C5!`|W{<`3Ac6C_-s>22q=cyWumI%`dF` zy+J#{VDbBTRZlBzAFS{ei1*Tq>w{kJ$t1xxeQWYp$Q#K9D!$fa5T@=GePhS;tz`}G zf~r%yxUA8i>OW9;h59W`$5m}r}5f0(x|v@Nuos%?1sfbm<>kY=l}cZl5olHlePo#oL-){$< zpbhO(a5ly(e;f~-q#F-H&uP2RyiKLzEd~Fk!(FH$hc3F%Q4Et#dja>`0%kxO+KKsr z%RL9PRAWpx?u(%3hECt-nA3Bkw%@SFqi^%=u+_H9{mvky2~VIn+#di8(t})leL(4Z6U=gQK^tuD3xXA`R^x6M z!+4T=sEn3ORLL#pQs%U!uUTq!0-lJBYl4Z-p`fquc@&4M|KG3FE2)(XAj-5fVkR|0 zKjI4@liG>f>xVvU+)u0SzzgD58-^ai1@me87$Ke7nI--KIOi4WokCGBq-NrWowFXb zBDIu>q=n4Ju#{MFPSYC5$_Yzpr=eGauH zb~yK&_S`028vUqF&wV@zF)5%=N9-ri4br2H!%O#{cW^j^N#h$3!t6#YJ|IBonDM=L zIgfc==hr|fWslEPNbMml=AQ2z`kG>c9nMbjn2+Pb8xRBVkKCT$YNd9IoGW}A>Qk-O z-oOp>5nc3ClW@0A)t*+XAp1>o>naQxwW{FQWxX z6-7zMnfQ&(faW)~mpF~J@tpgSxZlE)d7vJ%+Bp2hKpL!E8zq$sI$mU zD-ZoBiTQoH_T&~nc}!cNN5qw6Hc^pm2eMh#a17&!+_jR0?D&)Bu9n=$k6R5#=c^Jk zmb9S}RRzoH(m2wGhNJ@)M<^Y+IFfMygQhx=m9&kGXTQcz@d917lCInLQ==C|X+bDn z(?Z6a!Qx^%a~C&QpjVkl0j?=w7!y3OfR;Pvq0ByS3+b7n<0PqP(c$EXnWI~s%n@QFbs zNFx_yCOl$S_ly;qg`1XSiyCe6(=q@gVo&N)9A~w18pH_a331ETI_bOBH_4W;Xdg{VJPbhclCM$reGh>O8F%`Ab;I zSE;B|K^g-dcnoJRo@BCrmj=!hV9Wwv!@zrZ5(>^r%)X#A+fu#fRFXb5He2!uq>_Ri z>AWi8QdLXG4OLIBk}P1LIJO@k$b(GS2DH--k{|+Bxce#UsZMs5ZL7|j7lg9JR?Knk zkmoq#RUtK%kE_9W#4q7x>NUncM9;(gwExF9IeEGF+T_?qKZ2-?M``Yl;E*H&OE{Gp zxu0P%vIRRq-zkG0met6*EWsU_{GKdA(-0f*d%}<BEYrZ!xA9bt=!Sp%NFPoQt71w_%juKqDNs8i zjxL~ef%rl6Qj^n?b@V`!_Fu9;XG63xc0r9@l+`1OV(x;@#o>&sAzWr(=<;Ir&dQEy zAp(;#zXK7%ZA{6FS^IU>-unkxk+s*gVdal&dp{L3va&PlA&OIU8^epT@_*0-t!3?s zqI1mlZpq36=*5V0X?fPJbs+*1LoV)vKs}b6bZ#RKXK4mW#Xkv7X#jo~A0R zCo8A8w@7PN8&Cd8K2FrFT_cI=LdR%FFO$ zkS@&y_Lq<-*zQ^K;M714n-=7fzg0AhK&78_64L z?=`t_MS~93AZZ;E{1;$tdo@UK-n&RxE16su*D2=4am7w<$a&9}*XOiF@G!gqtEfTC zaF}6N9}iy;FPz*2jqC90gx(EsH!rV4HkX$)pm^n7O%R5qP0Nloes`TAc3?& z&)v<#(;Ow*n!h&)_FW#hkvLBqd4U25hi58bC#$H=_2Cv{9eqTU@R7W zjx8dsu|<({7eJRdLNuqtVF8mwKs*BeD%u9}b7^Yg)}(V~?MrAE6ys29^hW-GWj z63l5a&;F)0P>f)72uIbY;;VoGx% z<+kN_-TffujrTaEWa~_k;dfA1_u*%ci7N7@O0$tuwFi`rNRW!~Q!8r6VUn8E)!;wC z%xygQ{nMN2)V>=AbeW~Ns9w*=ejj70DV{**>59$iV#VL5g05Vg!uc$#U>pYhv$Q~J zMlh+=43KJ0?Y_$q1f!bZDj27giCkk^_K`Eok`We=+V<5YHnk3Y9w(^*E8vtY%JK{b zj4CaF8qHIQ5xUa?`@vQZpcg=xF6E}!^Ll9kNqlK<6J}kbg|YA5x(NS;0nH2QlBpKKUo; zP7Aw8l5Qv1V?tR`;U@?~DWFW6Ww4xOTlsZNZO+FC9RkGsS!XNLe7;U(*lx2z7wT4) zUP-MjSG2onX`Hp8P_=0znVHYXz$(CuVY5(074areO{D|H&VbIOM1{GjrLNg&A?s|G zRkMX7A)U&qnLaej2%=g_$D5ERg)%LIWQG_ls(#;VR`Lq1S8#soZqHkoet;+WD-_xs z1wA$rJu?V^3cAiF{{E{_o-$3Oh8EN)W}to(^%BB>8Fm|`jn)*x>=L`IQb;wlY;%Pz z{kovf7&T^MWD4u6Y8K5!(8JcSR#8za5q_a%Vr&XYk{M7U`V`31>Kf*+GQ~AwOR~|2 z{P$62QAg$gHtLtsVyorFZDe+4$ng&lEk>d-f?3(k4pu?N^YVsO~Q;4bA(TcaA7sQYRufO)=MRIE&Aqv0XEM^ AlK=n! diff --git a/demucs/__pycache__/audio.cpython-39.pyc b/demucs/__pycache__/audio.cpython-39.pyc deleted file mode 100644 index d257ebbedef3ec78e56d10f1f8a769448fe04147..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8551 zcmbtZU2GfKb)G+xLyDqgS+-?++e|h;HMbJiKfBJ>bvAbFc$>vmyInaOgz;uP;=LqC z8qUynE+vZ?wQ1zEdB|qH>0`Tqw8)ErASm)!6ljshpbvc>^rh&NfdVa1v?%-#_d9nu zq$oL0B{4Ji-nl>Lo^$Rw-#O#W&Q>)1EI+l2U;_G z>P-W6BQSeb)6%oHLbIUy6q`l$EH!QQEH}&QS!vENlUYHvH`|=Wm;x(4(weomG*)8v zBaPX6MstpRj#b#qBfUA#s%#eTPp}%B!*hYnvlDopWDD#ho{P+3XV?;Yo?@S8=h%6a zPP3QU@37xR=?v4?wE7pGf^KcKuBX;}p6ou=QA4FBd9Uxb{eXvFkH;-)&VL39#FSgO z;s$O~+t-=S3}#9r(e`v^5#g-Vu@CfJU7Gt=qVE?NY6rArc{MGy+kFwO^JnFiAnF8s ziw9|;?}dJAIJ?ppekk2`Bzm4qt&KPeht-w18*Pu;&_&f^DY(~*hlPP`ue?z&rbgUP ztF10?ZMxAw_6IUG(MQ-IF)aj<$Ktfecq?MOZlqQp+ilLolA*85Jn22r#2mUld+XM< zd*6z=i0|DDH=@mZw|TrNqyD{beenLudy$addcfVS*Bh4`7ggH_udB!HdyMx6t@xfd zV1Cr-@1zwM1TxnT+tEqlmC_{L(u;cSH$_XFKwAsf)USxz4P5c}apPK3XBzZKZ<^d< z#^30q9gi$%QKlJ1Udl9MPjA{Nms#Zz1kfy_Jj1H0Tw$}U25p#uE|7lc%_>_`8lpF6 z*&&ZS7v4ca|M5cD8vC&{Oc(>-7Ua>^PB7)3%~;iwj9Dn7S8* zkrRcE>~g0a>@=J;&Yf#LF1r!CcApl$PfMYn;11)`^Mkmd=BewV2F|reN%J3-=~4>2 zh=SA-eNRs`7u>}i=X6bbqOaCXLEY2@X+g=26eZ?CTeT_m$ZqrKm)b7+_|kP9)MBy( znqJX|3uBfw@(xqy$3(vZE=A5d3Jj!dXlz?A;vS**ZM5Rt8O%r2dhmh_mYO&kglLKgAvznJ&G81(6r-hrX{zbIMRopbasxPSj z;t?Td9j7=)26L=)FQGyrX4*g#rzMHoQ2M7@*EB)q0rZh>Dzl(=^F&t!A0^Bpd$Nq4 zG1&^usDP|tZOo1ay*@??>73HH1r(tE+6FB4S-7XO&=!czTuqB3i)*t1)WeTL=zgIM zvnj>zVUYMT-KLo&NNksJ#k5t;IIQ~lEXPP8eNv>5e8_D)$_X-w7*bylWUWpOB>HLK z8ZM=Xox`e#1B|`sn}&d=`NVi&ks_uQvh*!i?)0I59+@#kKKOhdv!n$-Wc*=T%yj?A z&eF>Dz>j4s>h<4zgVrQo7h$=p&xgwsUb|{qQXld}hxBwjSe(T`5L4#ipr_8=Z{=oM zo1X4!zOH7db(enIx>KB*QatiFnY4+inVK2mWweS*bbEnJ(~|4r%((7XX?Z1AeR$%~ z(%D-<>m0MPW7(3%Pt=U`J%PGSDV*YrI;2m`)P&2=7(;YDF*403aB)O1YQPP&=cFC7 zwz)m~|C5!@WOmXeLn*Eh8Lz*97BVwRl}Bt^oUmzPXIEP#bjyF_I0%raVb8!s0#DMA zFz|cuNe6m@Q?P5oxSLYjDC`+kZMT$^L?bcb9?S$*7B=Jt(|2uL<#1UR6RZCY+(YRA z)_4)K?Ap>!Y-XI%@TQ%;IZy3nS;jght@Bv_cy+fzXrg26&LlI0CpyMgG?aDmGt^76 zLOycTHp6VxwT+p)MOG6tsq7iIwGSoyL3MW)V}L%;XEv#x)!-G3(bzI5*2Vwg{S4+P z=QE%V^g5m&2zFGqsQ+VKyn-I{7-3CDB(oizXngRGvbHgY_b1rGM~1F-wB1@#0WHDq zEVwqG(ES8E`H`_ZCl~flCUbCzbIII35Dxfp>Oea%_D>&ZXe*3X`>8yWptgq*;9W++ z&yzWzAw98wXtZG{@us%FM04rdWBtK9--gMtV3qbvT?-d3p)LOBi|~Z>{|+v>uyHo2 zvD54f(UFv}r~ko>r`1ezFi}h19&mtyErhJr0>9Ih!A^#1c)RVld>%^22U6lal$2El zCh|fjCVUQ1H6HATPTvE5Y7GKUETfYTwB-YB@rP1)E$MhD)2P_-B%oD`kB36XyIf@R zcD=0;1Ph|5??k|4=nN1B0O<4vf%HL(BYngKFGB+5P8QT;V#VxH8`BTD^WC*&=hm`w z_robp)6Cnx>^f^|EWFP;$r58SXN56GUgYKzK%O~wkG#z6*$8AGrhP(km~Ha~lzvx{ zcZ8`F04Rs(TmXB(NQfFi;g%2aE@zt@g$dZ|p-=z|l4dNEUL1-eNZF4u09<0|+i>0$ ze#Z|z9DzLUAj_W1DOyJa5h7Yq3>(K86B~Yin)WfLCX` zVXWbPCq#gEN)I!V?sas(xVBAchq~7nJ}2gJ7n{ ztFH9iCw)j_;Q1gW^@6)F+Z5;Jt8a5puFem{eBWmOe}5P=S&ax&aVw-AiKayc!?NVK|X zh2FR@2}n{{TRz{;VkKRsr$_obLZZ3I#OG<&f>M(DyaH#<^4ONEFd&0-Pd-@L$C<7+h!^W*FJ2c8R;WU#?>G)f)cN52!D{DiY=BK~GfxBBb#C7Z- zEp2k%$DwPi4sYDWSQWQrsHO{h8}K+D+b2v8_boWo6MzXRtbuaLWWveNHR=|G(e)5j zT0QQCX%QI==xSQ|fN~t)5F!%wg7_0Gn~CopO3i94T5e8ZT114^f&G{WQpxab^!zEV znAAgqPTG)@Z9#LbbGVjFTNl))MW;XiQF$ybim55?0?LOdq`r?-Qlclu0VRR-;oWIQ zW@)<-BPZUDe2eE~M^&Le0tBP#3)qc-nutn^8-u_f#OZAAw%s<6XWde)XwE5{Gg+iL zD8 zt!c~xPYT23X`W#3m##YNzKjXZaM)z$y<2}wt^=O9A^s2(XZrR0&nRzhiEC&{Px!H` zj4bS&+efxZro~nOtl^TbAQg&aj?#!DJ1qH^uDbnJ@0XZll+UsCMMLyu|$0M7ndaOMaq;d#{3%f;1@*(8bq(OOG3gPh;#8mm*(tZI4Qb%UDv|mgLqf&_>_(z)+ zucF;PKmZA`mg%bsg4GJu1}f|L|RzPrZb zhWHM)tK49Qos8}ERUiYhb&B6<3EIzj*h)*^2q_UKU;^R!aj;ZUrMW;gKOC6=)t0Ir zxsk-T8T23z>dPIZs0jlA5S0V^}A^)n@u!rFhPA6O7-^9V^GODx#;q1%%~0| zWnyjoGxSgy;fb2U7~=crm6l)_3Bw;dC}Z>8N4Lyu#q)3DXeb{D(*-a4S^{0lzXQ;5 zUxWeMGtQI0d;`)ipwHXT4vW469FQM=8Lc*2zb6aOGtwC?tVw(%qSXDTNI;MLYi0Nc z3JEj6-R6Qa4nWo5OvmSlL~!oBt;ko$O?cZ*EAab$g`W{eIKxviikyIADe9TO6n~D5 zbn5p${?&h9_|LCp8d6a9u=9(z{&l$ci~snCyzsTu0@CdcXTKQ&{QC?(mT{+UPpnaD z_PmFw>8;1&2bfk6$`gBZb8#CTZ1`eQvZE)vFYs*l%?!r6#CmpN0mM*U`$Qg_cBNaA?syxC6vLbseqC(I}5@l5Uj5R|h7osL)U8j(CjL6rauU)ietvPf#~H8VboNY^AKY z^6Sb4D)wc}CDIS=UvYwtPHJDzVxKpQsKpm>BYRjy3^w{3RnxX>+m&;6(N@38LS-I~ H?)m=#1O#-T diff --git a/demucs/__pycache__/demucs.cpython-39.pyc b/demucs/__pycache__/demucs.cpython-39.pyc deleted file mode 100644 index f608b5a4d1b10c74c3e4168b4aee04c40539e587..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14082 zcmbVTTWlQHd7j(OE_cZ#MbeZ^$@bWm9c>{~mYhqiI*M&6b`r>JVAHuUc2>ikS#rtc z?&{2}Br>yU8Y&kQP+*{Gn#4{^CcVHwkv=s=uV~Xg7%forc^-%6O@~5VX27Zs<`1ac3CByh3A4Y!;9-hV(eIH3+1ZLA% z)pv8%l4pC>mbPmbNEXC%30or%dB{ zBghAZn?_IwbDQNodv!XP2#Pn2)tR6aOyYh7HKu|xYLv4YvnZJkW>7MdmCT{!NHB|% z*{oz9C3C?%O6IeYdjj(%qjJw}e21}IF;lD6LRzeMTeY~+Zu!mICf=ZEvgUWr;!ss%)!0tmZmZsI1~v9F`r|tB7ySZ~&{#DC1M6dAZ6>~C)Xg9lxHl)T zLh`Mvxi1-xvFc(z#WeTq<(F1o*!^D7!|&M!AH3_0c*BeQ?a0mMtrK=p)AG&BRWOsukOtPGBci zVkTzb+;UKsi|u_k$R*~yk(dV-O7aQI3xR>vbC+*(e8rtxsW|D>S|ezvFx$|y*y?Uq zoBmFyqO_z}vKlpZ!?Y;NDEZqK+EymgT-5D^DlJrH+pE=dVz91?RZ@qmk~+;O1{GXx zv_fB{Zd2c#Xsgkw_@rN#+f92%} zQ+wr^)@FO_l}ll?6}LODy!^2jANy!q#r|3|tX_N1(z&I_rR-zxk=vzLf^fTAi?Z8N zXGhJT_j_?glSmA+XdW|NbKaaacgsRsOZ{)E@k9E*GBOjmF5-&5h!srCP2&~2XZ7r! zlbF3+VD{X^*aRhaO|{8Y-!!)zXVR!MyB3Fja~_@ zoy5v|b1-T_W-=M)lj5Fv$$0h4y{Tj>aJDS<n7TO4VJk>;aa+|kQoH8ILX?%VI?8J1jnsTLH7};-N^0(`x^)HS zgX-5CLA;T=5qMvKUZglm^Q|mnx4UtgYc{qUacXX<6C75O``Br|-d5Lr6+DQC=qV(I zIcYlhn+JUskS`*?XqBxpa%Ge`=8TjV%t!Hj(s~qiW>8wd-|m#A<*d(&opGK@o@cg& z>Eg^5tJUpx(8a;2rjym`)vn*n-sG#*pk1q0<*=wHIK8RtG}gjq6EB97U&LF*FHi*} zT+4{;tYA91azV<*4iY&KH%JcBhmgUx3cK644vd~fc6J& zM(k~b%}(U4K}_JG7J`#BT43HmJB&2Xej9Vd?*|`P2Fr4FkEtESLFxu!3+O%g;iE>N1{dxW;~caT%RM@tC?$2%Rt&*l$>(Hh@LmoLY4QH^|>K zR`a4Z6v!mf;$_O0m$=ru-0MR_nmv<$t65iG>xomfUvR;i=c1#>r|0;Skwq`tUR22 zXd;nUYhoF_xa~dOr_tulX=)KIk3`vuGd)3+D=RJ3HvCpAY(}X=lCE+0`# zY4oQLzmUVkN1@bghtTWPBY2bMc0<*U)HAI0ek9;8WQ=N=3Aa=UR~Rri>FSdx&$hZ~ zE||M>!#%mX%;nJh%8P7BrGN~fFO?`4sOP+7D88?lGMkE}C-N>79ilv8W1nVM?AI`N zXGg~G^GQ?0$eR<23##73gojMM4@qh_THxRHW*g#wT7>kdev!GmvBMYe;$>WYc4#l; z6w7M3#(t&jDdJFLYz)=DWjQ#%Ni*W|evA!5IYO>RMfB^fty4?OawcC$;hKRWGo+A$P-FjtVbDBe`` zv@-0+@~Yj^{XNr++R&x3w4o<90o7^IhzG|)PwO-_^sp6S05a%~YHva*#mIQ}y=WU_ zVW!|EQOB=^r{(LyrS&DRU1v-0O8=C2XU<@@+a2^DUCB-ol{V23-hS-jiGTZ-_bz^5 zQ#4s7JcV-2O~Zh>xI0=&r%U zE&BkHB$>dwli9mj_8AmOm$w|g#T@36xvv<#dCa=BKcCEt_V~GA3S6h`|2qz$Y$WZt zXViLhe(=PS{*Imp!Gdv~(fo5z?188b6Sv-PHs-Itspma^yWMTYq?N4G!8@ThkX-|l zuY0lT0x{s4N}+6Hqg7L(4>f9Cvqi3oP|vUzVMC&biK@VmK8n>cq4AaJ1~#UU_|POHd%P8>2I$E{)I1~{eR*zc0Caa1M#}_RFfMOT|h=x zy<-y8&t(!ck2wjj2jhVHwY4mB@6TmoKu_uwR6R5qMM?W5mRFGg7b^6Z3I>=T1Xw4{ zg{?Fvq?EdvPHw-2`YTW9IM&SUw&Bxa_U0)h2{?}k*o7k46!GMOImyhdXW})>Ll7=5<@+Q ziS+zmB>Q%N{V?sS4 zb)Y6kFd8YQ@1|5WP$@c(#F#I_`g8GDGz&JYFSB4x+Du(*687PYIcd$}-3*Y9!hbt? z7b=(w?4t~9b5yIunenJ&!n%7mDE6btB<29B)1eWd2U7MXGM`%NBSO`qaVWyE~EfGSP&P8 znrZxG@x0<2uI|{6x-@@xYGD)5z~nD*GA)ewioo4zLUu++`2|_8MV=Wokj!n$ervn2A`!lp57|*wM^5JQ-Nd7 zc9EQpJ8)1MdN%jqM@UArEKnNoPgCQcq`SJcX95SMo@EIdY75Tk#5v+&uzRT40R24l zTsZVRk=QE9%Fs`y5gFBD;;6R<<<6kIL|tZc641j`U_VJvVoFNNME#=gQ3DK!{FOG# zs3G{-Q^uel^nVmKOE$M@vA=T5L*Q!tGxOhl{-1Yme&F^`0A5vQMi`zjl{nPiby3l!qg58bQd%357994 zko2)r@|+p@BUXhAe1=>G2Ht?34kXYRbD?5QStrb80UtF6suDOQgu$&fH+cbMy+x?h z9Ww&uNdQNfgvShktr;k~PtXI9XyP0=y#h)Lgi6OsCtxa107M5Y3fQ!m6a|z8)EVUF zjQar1Vl#_SbBmluK!CRxxZK3Se4JhheUt(xp4^-Q?4HL}+6UYV1>Zfek3)IyL2=(N zA0Au!!YIdS5TJj|BM(q?>cD=V9(Nk!esM7F^c~}t(d*3NUSTp=`$v*#0>tQfIw=q5 zQo`)^XAfW^VC-prA!^JF#-2edJ$7+_4&%;a-HQSw!5MVR4ho{yTbm1je;pVBG&+p^ zqj$_;R=%Z3(*tWdm#yHBrIZHABueM;bS#-m=GO^WTc|a+e=lHa?iJfW=h$CN<_;iS zz*Z-hp+LAnc&$+jVWq`8U{DW$Zg!{FHfr%{B++9R);8iuYgkX>m8dGa<$4_9bv=9C zcmwC^O;a7q+RDpX%(k^?x6B?+qXnPqQRTSu*pZwFK|Nf6Ab^L}udw)YOm>l^&b3B( zU44dGH-sk9-BvuQfQ{S0ZMJpvSlQsIk7?ZUg}7@ie*ZM_XY~t;7FR zP3>?e1fJM#H&Lx!kKtQUJ8b?alZTjm7D-tLRq2mj#_fBUR|Bk8C@;SYdkcP>7_30 zp>8u?El~czU$j0Vm#Lu&VDf!rB7!DF{NQ=9JZr&R0IChW#5`d>WO`X{0b0vL=B$0f zETcvlZ>hyVOPC$uD}7bL9*P^u0mFJXM;wQ6w37I9U^f)wZ{q4>H@U?*5$$(q4ODgb zi=IZ($8JJ9SP-xa8%yU2lv3wIYe=erS2v1R|z5C zW}k24;_vC(R8X{=<2IS5{$PhW>PByW^BesA$p>y<`UAK>NDeh}@BWXZGXOC<;I$*- zRf|!A40ZxOS;e_WngdrPY?wouPZp3CMAS|s_aI+Pjv*~2_adE)r}oPS;40PWL2hP{ z!--d)i>Eec1&lO%i*R=xZ?VlB5dTFu&Wx&k@0|efF||3@JCRtu`vRwTe|%3O zkjHE0tH0KJKpewH&r2RirnYkGA7v5qP{s;956-R!lLyi3LUJGG2}jm}lXwEboY*tg z$=M!6yVM4nDq{>x_=Dwzc+dCE8;A}oK7YWz`5q2j~mH-y+>N*co7_tz>o!~<`Dr{ z{yaH}Rtp$k_iiUes2t#Z;ItEP>>eKJB<5|)ynld}cc1r3=@b1Wk8mxL`_WTQzNkMV zOD!ZP(f1>m3s=a(*Pg`OX7*1&n(-@6;wuRB*uDGW`ydUVzyPMQuYMO_0_WswmRgDL z#|Zg7QqTh}Hz3_SkQ~LEX6z+Kax^I2LJvWaT-?6`#eIN<+zI+b?XIc;cs$jFwe~5f zw69jqd(0Df(JwtZsG>n1L?`@i&}ditfRT3!YA~9?#jnD)2bgHP-D;0DI9#>fP#9vw z@b3dfKGqkw8VoMFW$>>BGf0Jmc@Nh7|T4gnZ4P7Ksuj(f=M@G{l-lh-`ca?K18+%bvK)@7O~0{u^<>$>>NK zIgB*TrhmtJ^@%n^>A5a2B(<||4ETrhwC-K-&T1o6f3Hp@VrI-P?9WVGYb|NmRsnq#YVJkxy%h?1OwxX%FDBR%%%lnXN6lOQm# z(Nzp=Q=cVY>}#O7IJMo5z^An0Q3JjVeg#Gh#(UvH^j9)FNxllR^9+gu?WhnjM3`z@ z!Ra#ITIL{wjp3`hM80DN>XhA;cHcMfkd_M{7*IPa~s zo4L_w%4v?S27z!M*su8WAlM#sLq~02LW>qtWJi7q|o1 zB01So?`~7kXffCkdez}sdT+5X|IZ%Z33TU)8SC=$z|TBBG48V8B<9@bmV*-VDTXXC zR853gAH}J2@5rA7uJ6XYvbE9*> zhyadS*}F6PogU+k4uO}VKP_|7zdZJR6Pay5IvC+a|5MiNS_DI+f$!99a+TU-(sM``Q>L0Cxw-5B2{YJPm4< zE#C>ye-XV2puhW|wo?YU&d=#tARI1uEIkWfhQ&b7f;jNimHxo+ZInisqP8K$e56ld zr9TD0R-a@O2{#ZdzQae3fM5U`YKU8vIF!nRLnJLNkL!yxutn=kOC$0-)v(%)SUtu* zIZj&WA0O>Do*10pv^b4f7c@PH96^M_es20J5r2wc6qc4JI2*9%k|! z6Qb1Obx_Q& z2oU(xT8DWA_DE$;dmO+%HCsdo$(RKM#tIHCI=v@e4$&vkM;%robSx@A;t}b4aZh{V z{~*@~>mAHS>lmlCYMwE9ErK5#Q{*3kKw>l}ERe@DZwWc`GNNE)Vtu)dN%`HlO&J4H zhHXbDCw0HN*9u|OM!tgG0b>aGAnNj)DDX1TJ(w1Q23QP^680A+E0Wm_WyH{#T$zw# zq&bJM#4myX5Qi8xn!}Tw+C&k_HQ6j37|hC&CwQPm#Bu|!0sOUD#4%Z5e0G8(gPMi?Qi8Y`#7^$x z?WB4OqfD|ydYMA)lW-Ii5Iw_4>=NQ3 zain;lEVP6d3N29zXdgg+;36Jp7A@xh(ini`S%y*I&_Ep=vcj?;;Ue`{5dB7IJI!x} zVF!^>>;JLw!!Q5QfBoo!_A!<54w$irn{Z!%)1lvz_*C_mY-c`~+Mrv_mEFy5v(b%K z5gVxG8~jOrP5mxgpJXC(=l9so?zB6~W!WAd9mZCu-{HHSJbqNfD(br=kmLyqTHm>NiNMiC!a5rrEr+Xks+*f*t zNyhynJrL>Z_-{OkjnqkUuN&XWfgp!vxvUI?n3&fPBALKddCkP#+y{|@XwAfZ%DB3C zb@pm8w)YU@j1u^f1VN)8Bo9A`-T*E;@)vLupI&M%?b=IEFgdq`fKG`)uf@=j9%dEc z3V+MoG?H{0A&de37Yr2UYS2Kng5I2*6sf;s#lK)h(@)K{)NX~>Qyc$}=WqbR-=og= zxd)V<23DK`ZytX$xONw^l|4MzG0nQZiYDqen0yUM7Qg?SdkJqx$E#ve#1M*aQ-^p5`mANLQ*g-M`8%9 zLn)ti86ymI-jb4kn)wyuMB!-RU5Y(b7L>4N7alCwAE_00)Zd_7f@-q}ANel_ORbh7 z(W$>i!p+1^MT*l;a|pz2M*6hnF>r?mTyP_QBXi=a@i^6|g6h`3$Fxd2; zHfnolSXIkpJk!opGx)XJb|+iS#(BAF4td#ju9L6kB`?=5bc)py;(52w z9_y5=<<59@T++pMr87~Tka(#**_oZ-QEz*-PS4$Fd#GD( z)YpRz<<(l1y0@tWJ}L|GZJk!%E8=HhP#zy8|@0%OP6 zHTMkD2&~;qXol9F6&f`=v{gIEuI55x)n3i77J#|dJ61m%W`pc*(M3%+bI91Uc1vM) z&)gl`!#Qw`pS4`0X+2;ZHCQukD<#hwI&B-Pg)sA=VHq|2Vyv=jV1w-iMI`u^8D%hc z#fi?1V+kAUo{9=J0er2dc=V!T3WTVbE;-5uJ=Iu=a*d6SztI7@d9CG@AhO+hP>*tT zRqrl)krnhJ+rOm(HOUcjNO`T7v?f*7WuLmy=qe(XU2&%cqA!5jfl_g3~PIK|M+@C8&b?F_z;iymC?DJ(AYIX zbC2VjGe+MEt?Pz5iGn#x&qB66BNK?sJ5i>$5$u~tl*jqmtSk59Xyy~(hB;|&AKE{i zxE4AdN0n=HV?>i~z2nttQK=Tw4#dl~+O3UxJ1)uBYHqKA>`bd0MA>G$R}TabQDI&6 zfNa5*-YITHG#*2~;kDbqzQK+;C{?m;WyH9@0>px8XN{laOzS6;Wy>sO6iXZUj(o=A zl>Zze1JdMLZpO7ij`%OvGWgpZ6B0$jR0QcNyCrw*EwgG1Vanp%j7P;6KizG-Qs45_ z-2(c%%N>*Rb8S(MRD)QNe7OBQTLhZ_7YGIffJgi-qhAS&>VyoMLAro+S)C6tu_H#m z+{N_z<-l6a^e4i+5DFgQY-l6Qg&-HTd{{(S2+IhIVP3t7wz*}ip9yXGwZpQTMVh^o z(8~mg4hRAAcR(^~V?dp}df&}oH^NG2?EsZPA~@D~IIjLP>RWCh1aiAM{FY{nLx_bX zX*(WPbeXiONUMpkpgssDL#t`FjBxC?%x|#A3HCo%tUrQ{!1jUHDW}anoOgu#ZTOz+ zEP?JiOIyPHf>y^9u18MTRNk!(uiMyimOxe-EAu5G>f{H{t7ZREnqp34rPaOUfK)GG z3^2)7cYPy3ZoS*}+F+KflYAO0I1j^>k^Jb|-Ja^ymQ`<0SKhuQv+PpZbsZ7`T0@*j_PDjbfy>EU!Zv~k2aZS#4g`jli$neqV{ijxwa#a_ zTMPH>2br#S2Rk1}9KW@#`8d~c;m!rU10#!;dDPZ6FFEZNwg`}21JzpgW#idQPfa+- z=oZm$brtB2CMhCz%3GN+b5G70W{y}eoA(juD1Evb0U z@dJow-ssTE+f=Q(5<7>d){swoj&XEai0b&$fGzy8P!5(beyn2&vrx ze0@?{PULOypuL3aB$Fwoa92;F9;P%lba?dSI4|`yf)^QihQUP!oLSPVO<-kQY2aPfSeI$)8q7kt<%Cg`>l#juq z(-)(x;F!vD*h$SZZIY8HD}+K_=0u)n@GOHD7<`Js%M7j{i1IN;Ll);qz*lvZgG1B@^t;EaO|UY%@3V&qf*l|IkWavk_R#r}uZTG%Z^G|1t8lHId+$FXW#){QyujhK|x05;*)CtjQ0^dqJdJ%&DWLV_w2O@s4Ylnq?F)WflZvSFR0&cdttGNH& zOc;OzzM->(V1NQ|v;$D>UPsbG`!#n&HPj_lNoab)H_xWMZ?1S<&2Lxgx4}73*@q~1 z4-JzedG{yy$hlm&OC(aCWI(Y-O(DPul|wL%IBpOt^>r_@Z?{1CcvK?unmz%fJaS|r zd)aGmsLwFXgA?T;26i?AkLOXE%V|+Bu`ILP-k2+C0s2v916($(4=Yb9<~8sg`6LI{ zXYT{KwW_btkY3*^RBa+DF4Sf7;hXjF)buL3z*UsCn;`fuQH3)mNN@%6#!V`d}5 zmGaixtp@MVt-x6j0m7emyn15=EMAHrp0wKz#02RBazcF=tDu&ccb-Ru{cN*Nx?ssYr@p@4-b%!ro@znlf$*W#6%=0T zm_mZ=BEaYs$9>kFB`mK8#mQq{7fn4+LE2=uw|h{icXp*|qmTt83W+6gJ53Ft-3YTvX>@Hb5QC)slZH zj*a#;tFlRd#nKwD(M;T$k6R-1fI+=Nx?Q712f7 zxs<>UfM~RVmuH<0rQju~XQ04=@SdJJ2MV4SuX_5{H7Z~UplVP9wR#ZB6Uk7nnV^F% zk(+q4xzZv|vl;=v20fWp8xhr)h{ox3wJyZcatIEOaV?qzUGjq3I+Pk6FMuv$Vea5S zDv8R&lh86s=~Ypsy#h%!%E@y5sMPWyLtvnW)Yt;;rk=fC5hmfa7 zP$G2eml5$#A~154B}yPnCHcqr3 z1~{z968(HY2?G4wHJ5>f{elZVxNP)GK?WQfQp1jQ)p*x>*$}R>a$Q4siNLV4HjiwDrX-cmVrJoy=vOF#= z?Ur%ppW1;uV{#tpnlS6)t&3^vH@QdM$$R#vygJ^WcC%p__x$_JQ{<;9NLBZ&KuQj9 z?C^kV>|_9=nf}4B^sc489F|gsG3pNkrUY@Y5D+Xwp;6JnGJ++TxRSn>tu1s?z;M7@)~nV zNmP$THtrdLTuN@&0u|Xyt-7xkS$v4a8IVWyMa1T2heRyfUu&&{Y=Kz@E9!ODzQN!P z246u?&1r%QX)gfdjBMPPeN2PoRjsoYXAouEtxgN{uSv4$s~QV#GPuoB(^seDcUaFS z^oT;+Kg8ExMPNW+o5Y>Q77PR$v{O}f{HR>meg<%tBk4MM*`HvBKuk>vHPc!m+jKzla-em*>gK2ox(Ch;?pX3EAwuH7{q$Er25_tl z9P@!P2yrM#hE+klD%cQ5p!5)RP}ct*4=IEXigr15Gk_HnN)M`Ca8;q&h1M1@%RCew zCR9eoY6&VFj`V9`UeqrX4hv8<<#)$G?jY(>2!Kk(0uiCw32mUl$%C=7wSm@oC|{sH zk#c*u6-JqqQNYSK4FCK3BD4!q1vLzMKp&WyD1XSjNj4TB7_eWcYE~!420{SEjAHlF zaA^ozEQA)SzM!~?l~z!Fy=(dl5CGGhSPV2)CsBeO*@eg_Bg0TbQD6>Yz!$LT>hlZ; z0#T^ws|S@V^K;V!-HHeVnt2aSgJ731{hwy@0V02i$uBbyG#7*yq!e8KA_|7pDr%cG ze}=(;-;>pAhAo`G&ZL~@!J$5lyraMv{|Q74C?m^Yy*aah?=fgjG=rr+C1)N-x(q}m zyFP9m20JbwAG(zkIb)`)mPQA1m=yB0gV>+q8*5K^NwGhyoKwti;5+hR3x0fV!hoq( zGoI}}EDV(X7)7`rWZi@AELdpHJ>ed54?~xdcOP(%AgACSb&uh<=sxHk$8X7f$aU}w zE1_*v%Yfh`Q63AR8%F($0H6^6>-!uxRk!rW0%2MRuLN}t2+i1`g zgPcV7aMtneG`#g;dr{Nd)FuQUiVROw9qdA8ljudZocf&>^cEYi60}*0icntky3n>< zNVO+hXA`Sim)eaEv?iDhF3q&ZQKQ{!Z&0NXTa~V*mN|XN6&DL115jtw@O;>>x&Uj3 zk|ESI4Fy$+?#mCnb*{7CUDE?#FGh_^v6U}&dr9^N%9T2dISMKt56j@RVxyq*d;l{J z(lV8^vWG+E_v~xS)f*IV_h6r5H7b2>uuarWLAA39brE)~(d#Thg9NqLB654{;4-l9 zpl{y0xmu~6FnDdJe3cM~hY!3@V$B9DdQPk;y1saA5xWmHZl?~pySseW`BICHJ5ICJ zhOU9yi!-mk)?R6?I9>ov*SvG_{Dr6AnA6HLNE4V6ludVtAc>XJIdi3cWzKp1%1W;_ zXm#=Y8}s*e4jfpj)BM`&3ch?63vpiSc3OA#Yq4Ix9`rDyL5CMU@kR_JZbqB1MQ^cv z4DgnXgDIv5Xu-_Zfol`=X3$308*8-w*O#?+2Dgmf#`22PuP*`RWFoN{4jPx0o{Me) zjuv+-HW>o}dYgheuGa)eocNdtqWDg?1t)^7dFRF6rU%U*(Sa*fUVTlQ^t4uR|6T+A zX%XbEcW7S)aOGM9r6IdqC;?$qL_7UVsk{aG>bVl@Ju)LKhT zY^uKBgchFX1a(DICH=$8Nz|0k8vQqT4pF(v^}#X^z+=6x!wNV$K5Vv~R=ci-Cnf@{ zhJ&UfRCxjxbx<(CYfE-QHfRJ`DG~uf`kccE*`wVX*4>oCq^?Jr$R)<~c*>5~Y_=LW zIq(v|q=!kB#wH+vEQKA;$F#87BNB1@UVq7Ntqm|6N5wg_5(KdHpF3AqcUrgSdusXI zy4yT=@%;H`=FdOz#M9^Jk%WJlbpKkn-GXD8Sf5*slu~osgv@q(G@93OQaKuP^#;)* zSZ}wsHGv$Q9%{S+Tn%ZFj_;laA6vL<-~uNC!nMV_lo)>W-mmlXZ$EXHV%)y#-j5!; za_mok_|%b4#kGF#Q+Kca1Nf}aaM3u4Ao*NEg#K6=eEpw8IJgPQt&Q3%qm~gZbaKCg z-~=C(LkRQXEJEC&5EkVoSPD-dJ{CTLuq=Km<00HJk5G3F0SqDhL=a9wxHD=~a)+Fj zo8bZ6-o74=`(J+<)gh;$8Mka3ma~HXK^Vpg$MRg%7P|`ya#aar}N9 zzYpN|Y%~2)IwU6KHn< zIsYR#7M==E?^xJ{)BRJygW;p$>82GPb+ea^@UT1iz7<~N?Y93^I1cUJRjdLb>YWTv z4eEs#+^P37;p0;CY3%gj-Q(fYqAzNN(l+C#%2+Xu2;35$? z2(vqMVN2m9WiiBphr`q1qcWzAxk=mbkUNcWpOA5%2|3p9W2|RntY>7#AIgkBMC()G z$+Y!_wEcni?Regw!1xdBp1}ToVh>KmNF9>YGm<)mS)J}b8a#rF#wlu;v0IFvmiTFm zl2I?m(9Nh9;Inn6TR?jD7%)qZralO#;1_igxsQei!fAKr9qKZspocJOkKXzqd>DOv zH8>R-;lu91cj(y63&L!@J{C)r_cx{Zy$uC9%Q-_m;p!t&H;Ksm9Xh^ghogAF7HiH$erU7n`?#RGCa%S4$41|}~ zKS)zx^J4!LA#$|iZnHV!EQTa3ix7n|b;y9?KO{1N=EvxhysJg25u#&rqb;J^Mr;%8 zHHC3#cOkPei48=Nfl)L^>O?^wCw6_luju zQ;eOjldXx166r=eQza>g{5%1PKb)>Rkex?o+o(6hFOBZ@_Xy2roz}cJpO)y2(6YwG z6p;lv?g1Rus$Ib-AnEfn51e+I8%hKx$V|Kp5ifuh;&2xG@4X}v?MR0%?shBA8GR%N z;D|3+@eKoVjyO?@RJ`xfk?b3PV-F|i%u=t{#t|R(LyNC+-2RV3`B+R#T8s013QL~A za|8J-5%5J;+-%|C4a#`S90i5$xz=mLH&1SDvD_}01F06WuJh{EFGJ>ZfjzO=6Gr#t zSo}|27w&b?(~|xMWa7lcuRQ5ipG$9_b7qIW5@K&1wjifsr#5xSd{VD~p_S2ioID*! z?Fai1cxZ&0K=+cf(%Zy^ue+68+cDKmxNC@j?A#pc$oVWU=pAv!Th_4Nfe-KfTE`nQx_a?~6w4tG9KFEFGW5lF;6aVefo&02c8>_a+ump|oX6-M zi-JZIOb`&vU-9M-I@D9}Hy>>+bCAI>hXIk*IRcQuOh#L3zfMvskz6iZawLc2CEzEu zJL_CTi8k~A$C&Z>C2_gCUw1K8(4-%HJ>_m%C6>6obIr*|&x3r7Ai%wy3;a@Kwu^}fmNDF1LxB(=SEmX^>fLV*Z9jn5x9-(><@(~QF>L?ed*lgD z&#@x}LO?@YADQGUL?U^xGCDEr315*xVT(#`Uav`Ck8N?@GqfnY()I*A}WdQgSs@h zdHUWmEu8Srh6o*%R(k6-?Q9U0hs8j=HcO<-sF3ilD5pVzW};T71flJJO^%q4&tQ~~ z&t6mjf{Uz#Se&O;S=EMjSP*48^*h=fiw+ZFpI2*)?XV8KP5JCW{4C^gQB+ALq8XMd zpxWFeZ7}?Gw)-^(?;)s;C##oxPBjOLf{Tgz8?5j*8SFCnRR+JpfLo?<;hT*8Ed>QU{`n79`XjEAdsHo=&wi;D7>=>evv&(5OThRmX-%cvU;+YkDiL?J#tp{5BtT zdDSr;7t~G)h~P<_n4iRX7Lqtoze<;09zpdRT+;gtXuPWC=vQCcRy1~2^Wt7#+pcE0 zx$q=`=bm>*z02Bvoq?PZ9-696)>qAd@hTpPYLN~u+F8z8+eE=;P|dAvVs{$VtZY}5 z0sJGISb(>V9-|1g^>tv#nn7n@#=-M#1jbKm-;l)4n*PG9HC&>-^ULrxp@+T>Y4oCr z3$|ISA~Bs}KGQa8+AxDLv@@1D^v@cK|JeQ@H^r98Ki!6gwG#S3?oVtyd&^lgsaZ)} zO6jA={@D^E#${|L=C?RaM`+HWjDNB!^a*EV)Gt^ZWqUHVYo)#cBQFt&_Oz7~GX9Sd zKxIOQd%Q!$(=+W#YC&-a#}KOhpu9UJ%DYTp?`C05&hg58yxD}rg&_4VQ9wKy41O?cbKe8U590VnlLv^{6fv+^wDye%H zExWY%TI$bXfz~t1(jgGD{UPkQ$m2}IY*Q;D`CvH%m)LA-{l&uvZNnr+3X zOKI42{IKEz?j%|g8=uay0- zgmwTIW;_LftFgubIO@Q`hj8xay0iBy@8{sWX@+=cA+RUpZ2vbyKm`~A92r36)_0i; z7eK&Ttn~SAIc^Iq0L+J=HfIk!eRYh>1eX;+4g%Wg=S(q?&(9S#AO|oKXbRj2QuR+5 z{4N7%?^56ifW*d5#;g7*1A$zPF}`lt558PY-rIkk@H>WbIKv6l^r3aES)_C3tThcX zuw6-^CyhpqE01b=Tq$G_eCh?Dw+XLYycVTru6~lfXYSJfGJYJp$G1N@99bS@=(Vx^ zcoPE<0TjErJpe1@lY^XoPOg0z6Sg4Yl-^=y! z#0Oq4#ohcK=v!YN_n@zW`U=7#`UMqPGhpUO9d89o;l59Iv)$4jPQg)NFJF|yV`U7R zkCV2CzZW6Sqs4_GQbTq%*!H`8J|N_0`O+_(|KT^bKKh(Ah??Yikrff09WMV$u;t9z|De7dKnKb^72eV_2HV`uOM$C0Iz_t zE4!1yqll~hN|1-?szSa1GY4JFO&GK$2~$8WiDf{y^6%Ji?4AI}n%XV7aNOLpXbid4 z4$Hg6z+N4L6T0P=-qE>>m{l3yaXjG$WyOr}e0y}}yA>S4i7*FqP6=&+AHv-VG2NTM z!;T3$+7nMGTy;+$LrZfQR~wZ+VTjzCfMAY+$7?6>GpztxC2942@ zAZ4@)WT;s@>dIwi@QX5_^Ne}i+@46uv!3S&eLjH>Q~K;;iy%Vbk2#RVLP!B2+68d{ z0>EYjMoZ)b%hNSLzjGS3Q9nj&xY?DEB1zItcO)UD!}t>i;TTDa2x?h)-I{n^QAR5| z+tE1>sD@Ta3a<*nacNbNR%IeTzLWS);X94P0bxy!$DiOGzyWx?Pwj$0>Uk5jT?20e z^!ttD;emioccmRuERx5=Y1Azb>ar|6fc4J8u33gySPo}b@lMk}5>B!v`-Agf89mNm zJqK~vEWAO;6QMy&jaB?;oaza5Ojv!8WXBa4xKC(R+DFji+|FaAjO;sLZg z2p2}PeGUIJpy@yLgmAV9}OP{6C}*J_Zf8)L^J6%1DS;HVJgFY zpwIE)H2P#6I;moO5WTWyq|lmB=F`NEWN#7TADK#&=R-04eVpb0zVgR!|LzBW@*D~V zRY5Ne^*sb2jyc!6P3k6Mk4TWmG$s5tD}9%p*&VOmjWT}Vs&6t4?FJmZsS#1X#Y&=o z5Y@xqVe$nAdkn~>)lmk2gdj2lG`+J9y8~n>_4k=o+Vi2|A$Qe9)h_OLQ_)|~+NYb+?> zmE1+bhYOQi&C5>>Z%(vE$Hxs_-jVpo%e34(Q7%@;RN2=*Vqcl2^5K@*S>i5=s%9J6 zScAfg{$8_*w|D9vvtmJa1?TCnvN6ee)!x1jr9O?4_bpKBHxV&Fr%Sw}U|pZyL1QHV=cI)APDum2KYWXEU>SEmE;~$A`OWW}106MG`(GX5JA}d6dqe z4F3?DHcvtvFGKF67z61urToy4^AGJ{>jh1|hPf^1=-Y@6?jiU~GJg#ji;LId;;*y# z=b5D)e5CnBW(qb6B1&?QH>ffw_yx9jhru@({33!mOH1x^#96@F! zc=QKMa<4!7!Sm0d^plvMc;Q0(am2FmW|aS|_;(PyH%FHZ@|kApKTZBs<12-ur2~br z!c^gy;wWj}Wu*RbjV{FR~Z2K^c5Q;$k5gvhzF*`^?W%WA&cRU$w0 zUPoNyl|fzvh?b9kT>^*ISKJi*AuC*w6)z5Q$BS~W%8O4?5^8;7gJnAmIK3fZ;sleA pAi$sHfMNiDa_u>SQEqs4rcC4m+7N{%5l&{Nk4(QleRKK?{~L(18{Pl_ diff --git a/demucs/__pycache__/pretrained.cpython-39.pyc b/demucs/__pycache__/pretrained.cpython-39.pyc deleted file mode 100644 index d3cc165b0692953f2bd5f06617b0aa29e876e953..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2385 zcmZuzOHUj}5bmCtot<5lhi&}CjwcT(*+{#PNR|@ELa_~&4;T;_CmN9q!*=g7FfVua zIIvnyaOIF&q$Cpf=3D+uE_2Pve<6pEsvf`uXQb)ruIfiseP2~iFg#o*@JwyLUjNe} zNDDB<+*62E*zy(`S|#<|I_?$OO_tTsWs>pjBiuc>!~ z4Y9^P@outVHUi&UY@AQ<%I0lu-Xv`FH-n9_OZye?4!g{*fc^t!ER*Kd{={4wurTSl znczwUVZxakr;JCkU4IKMk@+Sqti?cWz_-#3R1m$TAcj%(=?staj%?aReTm1Z;!8YB zi`rb;2_oGceug_OB+-r@4m}EbXM@JWWJjyD>XPoB*fJ|bD*96!N@eo?mwudLU&{N!11yh5ZWck{xvF z^g&M$!N>5*F=!NEr2xSLW0wpFM97?fixZkg;dC~FB!R#bRhj0Z!)4PD70?yc<%N}{ z$Fs{Ap%^`WL&P;u{5|*h{_2Y4LashcHq))u87{X}nys$9SZXb%LIvv)_g~#>f7zbW zvKROCXJ)nZZS@@{?QEwQEv0^*gi3L#Zo&}~jU%){#SQp&(4W)CfQ}8X_U#dL24qt+ z@qq5q0o??CG7DLF01VaRDsxzk)!ER21rh=?8p=RE;S7e+7@dCtPCA0W%tOIh-)PBx zrPY#QuYX6w9__fNEc$-Y1$h#vT<|umvNm{>!HT4Bw^~UMb6`7Q$G2gD|FZwh;spY_ zILA%Jxn0~D-VO3dxuJB=t!)E3RaUCRE$|di5WvO$*H6-jap88-L;<&v=Rp)9*ITD% zYQUu1g@ba*-Je3W;kFXo(o#Da*ZVci;&#B8ALlBE^*dm6C(32`iu*m0=2_7|Z6JDi z%o8Or^03~{0s-I`6+K5YqHL~%z8KXTB3t}!6!hc=Ak-8@G{NWcCrAA$xS! zP?o9;j6Gx5RJNi66M9zi*?=BakBIzHIRpADVb$M_JxYnH4JfREnS+y=sy;9W7ObfZ zDu+V@azueCPXCuRG2c+2?zJCiv*k2&PG|RPWt~&os-X4$-7<)L8}x3FurizsQm~^^vD0ogv`@v* z((JQ^=d=FO!ou^SCV0Sn1^ES*W>KgF9$z@9^jDVVL=A^_D1F#Sm@p7kZ1BD-YG>v( zZSgURKY_+;lt4hig(~dTwf#PZ*>eEglU|VQ=-!2Bh)LMtQ+RcNNyVYaZ<~%C+h(0M zpg%$#;C-Fyng7A(f9f>WCwL7;5AY^4`d?{q@d`QNfws=gg3eaZ(}m={v_Ij)M_^Rb z)Th4l6)P5Gw7lR&apf#BKGGAjUQVM|Iwvjh1#U*?n|F(<@3XYy`$aWMdp)QC2D*=Mv5Te1;P>CTJYP{y)6eu^2 z?_=V@xi+55g=pan5F=_RhPs4h>T}#@92>klbeSnk;B?_EWGYPIN-aZz9-WH<-fTjb rFy{e;IFI-=5=&wfEz^PO6Rwo>~e3|%h~UI=N$Oc z(?tW%t6TqE-Z*0z|Dnp{W23Tw6#pBUH0p*F(hNk~teb+$R$#U5y3J)f$kiRR*+IUZ z=YO~E%A9n9Lc3Tmwx{Y-7?+dzpwynOPYXji>t!{iiYw*?qkigbL%Oo?xgiUxvTM~( zD_hOT;y+9|B}<>%^)u+7mSyypcg^}NYNuocwTha>=(DJwmNTf&aD5K-GjbO7S&Tl1 z+F3b=+8k=PW z`tY(BX>22in*QWAil1U8Z#(L0FZwhbE%gnq!>EeXiP!wA*Iq-(zV9b%i{A)pH`A$G z{_4Gz59myBn&W?rDF%@*8+_6xO)&Y;iwj6`37InL0=I2S0ZEzZX`bLNUWPQ@)H>3O zRV#HHjnHqaMk6gYXwq&#-HN<4`Zd{rJLb;^knIdik~D= zXX)X`53bygbmA{#g3W8StF?J<`}i9F-C2^V-EGE8bT_rmwstY&1=^{J%oSY6ChZOc zkh;w^)qE0n+fC{nJ`M`>+HIuxkH`{Z*Vr)=(Fcfk&A(jUvHI4YDb0PcWB2X&;zq4+ z?pb|%UkGDk<`JN8dh@)JvcmN?St z8($gH?hE8-|H}B<*%hBbyb!Ikn3{UIWf&Es_uRYTrXRGVH#~v20z|B|0_F9lE_-UL zqnZie(kpn&-Najs60e%qbjs9@Ix0+Ut$dj}-H!AVl@`|2mTax6I7zKQg_)EbUB-BQ zie9n+oz$lJQ){`qQa|N46I@(lxcvC$^fZkxbH}Lr$7qY6LuLp^l+2>Ag(+SZv%(cc zVLn6cnWkGh0yGPa3rO)ivN6!=78E`wO(?u2Z795hdY~K@4FfO z6I$x0l>O|uMORglE&A%oi;l)DRtvg7v(SO{6tXnmXvnAuHMZhJr}lCb1^N>8k_PD) zC>tqsRw8U@!oe?*VpcR^n-z=OPQLtG7(Kjvg{xoBDeD515!Vj@@bz1`ejD|{^v`#-zbFfULoqErU|uCGfCO9}MMtbPnM?JF(*N*e1b`nD{O#y)v!V7hp~Ekr7_BjYMqEXY88505NZx zUt7ePkB!irHxg?daOpEo$lRCKj+Hol^fRCKuHKHif%HPq<4Oxg+COT&J+`T!g3t#M zz$9PBK2odM>3#$VmD+x%L1s8(aC>4O+)e7nc&(b6APH)QBZkUFRJi4^ph+%-z?d*eR;)KO+QCR)nxweHhX&@5 zZX?A#WXuy;PD%rpVc!5}{B{ShUALupV(K|*qMX|_^n3Kb=a7>D{xFGGm@UlfM(9xA zj@dUe)zj-q9*%=ajzb3hOey;qyD>LlmY!8h*7Ox@qc?x23kCsX)iG#STR;KZ#xdl3 z&)2;Bfg;lh22yyB>?ZjYQK)KFH$!@HWbNZjrukN)v}|eko!ym{))s`EsJ13co;er; zgcEVqovls-E3&t7SoRATa35*pK{$YU5x)w235PrgQG8}wz4G`*=sJHmd+DoK>n5W4a@MY+mR3aZNV;^hGKAB}1TXHtyFJL68`M{J6iHGiFQaInmJS3uwZx*OT zMD)Fx=LQD9j}()PT~W5(3b=<_X;TLtT4n($eh1l*d!#t(LbC5*)bq*(4lbTpQFH5|izg!7Kw{1(A_NuAQUEy+#qnfFJ0e2lT@eFh zWJr`oI|2W?^VT9uqi(cD8)+_&M(h2g|391d9Aa;So*Wib-3$$lvjDdg^c0^LpX@6m?;EN1` zC%13kF?Iz!R15YdhcR%FP;yYpCwT;?PCvhE0rUCA-Wy&k_QJsj3*UCAR$?XB#{rCFS9n! z4lkU}i)hJY&E9VDdH^%ljM|+WU!do|NCpLeW0a1FsdhaePa?j7!tk9m|EX%Nt|f7r zkJTy#aWHapwR*&tsZCf+3$eeV-~jqkpGJRbVO~uK!VA170zth%V>!5}TM$ZG9NE@b zzf03DQg)9r#w;-#$IEYu+Gp4V_6{IubgISb^kGNu=QJsSh=Z+@ ziI;ZI>rP;ocg4K_CVJaQG2!18wpGcw4yc#fn@9&9T5SO-zK(22y%fMrgl?I`HyOH- z!$4)eU(jCLC=0n$Jsg_h+QB z-*J9F#~GTMiJsg)u95=Lv<7f#H0dPEI} zh>ZsQ4QWU!a|d>KJgb-(KibYqsE{w2DV#pfWq6|d2$N98$F8hAk{yB*mKbl3w2}9y ze?z51o1@;pBMt0D2`*dj;07l^dn7ucZv~9446iRzbrhYX?(m=|mFNe#!qGAK9 z)Kia1#3ZTIr}!2r_X{^BhjQzvfTl}X+p1loig%5)12Sw1lz3T93R2Ir5G+| z$0^M{3=s)1JnQn;pz1V7j+TR2;}T&&DhE|$Ij0VD+humAu6 diff --git a/demucs/__pycache__/spec.cpython-39.pyc b/demucs/__pycache__/spec.cpython-39.pyc deleted file mode 100644 index 1c7501086f0088a588173119844cb9136094aac5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1096 zcmY*XOK;RL5VoDf$)*nqlvhEp2RP)=3M%!25CXO8afP7mp;{58vA4V1-6S~agOzjR zT15Xr+Z%t$S5Es6fD6ocx2V|i=g(t%Jf3foxw())dvS8P=Xr$uz{U0Ez~T*z@+p8I zyF^gI26UH+K)AyDLU$d}5M21cT){q&C>$-X=h>0WdP$bb=CMkOLaJt!H(~1JYS{d+ zwYk+yGSTcI8-rug8WToH>LQ;ujB5tQI*hUc(4x4jn`4~Q;AbDl;4SZ4FCAOx&E@fT}oB9 z-)8;%puHi>gDNlD+dJ=<-{q=G_69OOT3cCNd2VexYj)ab3pqSY%XV4Fbfx%gd?c01 zQLFM10HL$cISrxDQ+CyV^2RJxP1yfWX1s<{TbV~dtjug8bInoYMJT%ar1q`oOnaJN zI5p9}a8U3iQU}u#@HMrP2NTKr97KmQ)eKAZ6lHFJPxXaB_Jtua&~QCRfK ziXC~%$3=k9KzM+F0Y|=?Lw+0K27vK;D4V*8hFfTGl}rZFj9Nm=T?Bjk9d5vWEz$)C8UYGeK{fY}_nylR%1R?cg@i3s>p>#e^YL8Sa)eU-I~4Kft>S>?*LSFo~I32#{p6Y>})(po%h^{US`6Xys{*En6dv zyL;@lOLK`g2{{0V9P?2&l?yliK(0CF2lO>3&bg$>A$i``c*b6cX4Go6`t$AQdEU39 z*;$|Adil|3Ygf-O_D^~_dAWFb6Sw+162Ur5aN(r9?{pkac{g=Cu6=qP&ps=iihWi) zRXn}4)~|Q!kL7aKaI!}o!;+!~-lBdN};%SsD#*SDN7ap?%-nk$y zic2WFDBQcO73^R2@9QK@bP}sT=YfiL;vmXIkmivH`niZx)%L%{idl=Bxo)26_>t~M zS+pL@6%={5BfWuV;eM9%a@n7jEXr8vT*l${ zxRMB!j&$55BOhNLG8NpnaI1Hb2tINPzRBb@%?pN{gWDC($1chp!G~PFUATu{;T&E8aUcVa!1S)e!K;CLP~SWkB1vh0_)kBcLnPFUE@#Mr z7qnHV=jz&Fy})K3IV0zNCR~qs?7-!0TfWTHgKLc2-v>yu1>F!{!T!iZ<?w=*sml?vFl@EEd_A`>>WccQfMfWt`1&|QW-~L&yp|}EkD4OOMD)HY(=p|_! z0F)w8TWwQK)-%w_xZS~UW&g}O*$xI0f!-U$0hkvU-Zqu3G*U{=VTVk0YnMQ_e|cHv zgF%w52T2w`N&t&YV-OkjV~vTKiUmq*!T5K0Xi*K$@foq4iHn9Fbwb+@2Mriw|?w^b#+{e%3OU?qAfghN%;P~K(k8gE8=hX2g+o=CFH6wG; zt=>XXa3~g&uN00vtJx;cpkTjezhgbP72!Uv7wphEVu$V#TZV9t+-#xXhu#sW0`R+{ za)TA5W7Yk?{XEtdX4Whv>c$XU3I@53A;VFc?ghJm+Y^#D7^F!o+7@B&^n!;QQQCVL zBr3Qkhw;)F$B~Q!E7yRw3aCmjkZ~7~$g`zjcO&U;1ZpE6rXu@}4zT4BKs<;gX+9yw z_P#dMWmA&da;7Aq?UgQvF^(^?P7OE7jCvjksLMXQwCarb2x?zAo3NB%AHf%V?EJym z_Fw@=K81?V->Qgr5&*O!2>2w(BGQp@bPk{7w67VyW%%H8F0Y~f$<4k&i0-1`Y*qBU%Fr%7CmcXD#~V8+bp3GqT{sYu>~QPy=fnVbzc= z2R`y@;T&53DcpJbF}uyzDn2{SeiZjj3G8YGjQ=&{MrFK)tdTJ_N06GR;zmInERV0S* zJhAQa2Pod7?>|Jcf4Q3v_Wr+Uwz#~3YIuB;ZTxBDN2sN2jLGE($e$uTnSf&HE>(IH z3B{MR1~^GTg$@NJFgV*kFWd)+7~iKF&iGi+oaV0~&a~A2hjmdnz0m0<7$0vy^+*S5 zHyYq*G;vyB>cqLm?e*)vZAuZ^3uAW)@0nylo{G}VO}<;sQ+K6>UbO1D6lC%4IH0*c zsd}orjH*1**YQ|u2z)3YJU!Z5FmVf(S;6j+{p61+c@xP=_3bp!svP=>ZG{V)OtV0% zlEr=#ZpT5uBS~scL6i21ctU%lE+HWy{s@9=!x@5U9lHZr{bq<#(<{{UDiZSYJ#1tM zFJG{AH2RF+A>cX`!b6RL<`&1&4?m?rXfy_Y z0=Py43O4P>sh2;li~yq18)jXd=czK4T?rt793P^!{1qjZ*Q=DXqcl%;_X&y>o$DCI zgr1C$v$MwD;E!V@^y-qSg`vp1VQ7GU3qFXpBx_1iPf0#szCg(>Box+al64z*%jc-5 zk<+;wjeAz8B%Q{}LnjSxU9fcw_TXlt(o)sPms&NpI#Z&Aw$)Db1RSQ?Rlfni4%7H1 nb*!kp=Qhg_7)xvd7fpVi*Ij?k|CT@NH|sP0T;0PF^34ALuMbi$ diff --git a/demucs/__pycache__/utils.cpython-39.pyc b/demucs/__pycache__/utils.cpython-39.pyc deleted file mode 100644 index 0ad927931ffb2a12bc2415db4fe7794075032a3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4895 zcmaJ_TW{RP73K`d-PKA~99fZN`2seLgKVm->|ER=wOu=w?KZ9z*O8rOBWFeJ%u1BF z@a5$H9&YbzqWfUGc zQg!g0fBfCt*DpEFKj>rslDRP4L2(TBGeS3CWU0j%*wX~St8k$sDUvaZFr5BF)}2N zop9u^sG_%D450TFaZ-+m!Ec#(NgUnq8^^^dIf5BOm~l)Dqi0mQ;wb_N1PC28;%%T=Z!Jpe(KcDJjbq_8q52IY=^zrh^S@oIoL`QBOjYin1u6EO`)sW z?jhrZ`5h?ExzU$KXW!qTh2OLD`#clLLF?0>n}fs>ekus82WtSeg!pU zT(Ef#I%TXV&pVm>9n1Vbv5cXVw>Y*}TK&(>XjvvflyrM0NZWyqR%P(To1aXN2h$J6 zgS2O8U|J?RRWFR+7!Phes0Mf*MP?zm6_^DXEJ>Bf*pe9!=2imeNQF>KG9R>6*pa%J zOjGAAXKhp@WW8z$O)o?=A&!G?C@j4~FrDHfG{NkR;8M_%QT)bqFcI9U&2F#LQU(jg zboGZ56Y~(JH&<_^or&&>NmXlM_Z?~7lX~L9xpyyIu>77goQI<7`Ty{&o)Do41HF*; zViC;AfKEA@@1;FGRjcH_UI@D~_d20j$UPDxXE$;eqUK&4X_J?V&E(ZkH;bmgG8X3j zg<6`VU3G+bmQm%USSItJRY{tKa`LK~H_h$dazR2rbJQ!C{qLLCKAgFyrP4E3lf`st zW>V@UlXhqB-M{c8yqU6U|E6#zk zutnHx-O}4PHE4wiW&?PDK^P>x&YV=^fs$R223rEOC1X#J_*f}ST2T6B+m2dm5>b0w zeZh||1?PfJDuTDi?YOm$K!=@fECWDa$xgZ~MUW(A-*YJOggbd+cl;y`oXT1Z|hEIju};=Fo4 zjC=CBQmLwWYLu9sK-H*#vc+)j3nFZBSyUq!&-+{UK$@hA*U+H}VorI04R8+#s{msj zJHyng=moyYuy(Lt$-M-g;Q+R1ql7Q(G~Y*K-XRQ4IuG6?^sE6>n{a$v3_SyIoL=Sw z9p1ySjBm2c+hWXlSjou6@hLS{u=8~{j$ou=w<{A-E2$r&Uy+=-3qO~aI}(DobY3bN zxz`PqIkd-aFOHkVjGtny9z^9}Ulo)=R)r0zpP)xkU_@B(=A;Q*=f z#xZQ2{hn>Vx$q4)%y7ZKb)Ps-*c0A%g)6*`BWrNp4J_mE=K;5!a7CCg7VhNmum<=8oPl&1nn=VeL9Yw9 zC@mtd-@aK63JDIti=ahwkD=(_ zpn`jHwg_JD&+s+A=04>a+|5&mBC%l^oYSy#+@ao!&QrEGhCX~<1lXRiHWN(njmlGA z&|0UXa;FZQJ)JpW+cG|bMg-OmM!=MOBuG64!xPR)=g<71_i5qyEdpz9&g2c>U!IDL69)sCqsFg!)=UeN_-rCl$~NDY<2ki1gWvY zN?b)Yp)R1xJ)2?ig3E{0O~Fr*4^Y<0xS(_uUj_ZX3aju_C3O)!Ir|9PaN(;FiF<9R z=fI(p?&;94(c;Uf1bL4)*Er%9(kUB}UPnAa;*_zrJK}JMA=*#^b{diAp(iLA^NTUE z!qp`U>mq}i1>0FNK0-mU*(OB=P^ZHsBwcNlc6QSi_~X_RoWFwl zAKC0BoH&2(gLgl8=N-!hiGVPq998c1lIW0&t5Oh1+s$^z+{YwsF93c8o^1&JO--V2 z8pQlo+Dps<6_a{tc8iu#N5Xag;38e03eG)D2qdQ)JRPYuafA6fGVt6bFfLlPKG1 zb6BBiz(Kgm9TiMZm3@Pxm? zaDOU^GVVmaz5O`2KUGCPzRO|&Dj3XPp6qoxD|gZ~zW!LYdKB70^uR-gb_GSBLbb!L zUer~AF-6w!qDTlYq>W_^Y0`khR$u#J zVTuJ)NW`!m0#37;SDQ_wg*_zj&3pjc>xFSKqCO*@pHsD`i#>((?>mq#CTZFbT%xZY z&=T6>fpa1WsnhP6=F+JYdnBSYZui(IKS~v@EP5YLI#esn3K}~*9Xq_!T~%9UU@kqx ziHd~><&y?#&e%Kx-goRS1wnV8x*-KkEVgiuT6CcKn$04sZT@y2zmtdg?aph_R(Bq1A7J@{#yzX?ydodBnq~#p%RXw^ zSeut@=iG}D0XFh6S;6l>dq)A^dlbbq{CC14p;l21tWaN2HA9sJiAC6#^x33}T*f|* zyI8Yll48)t130z5s$0R&_U}Y;rY>PdjZN21;x>>XnY8~B(Kf*6)$RY9#L=8`i4CO) z>SL;YfeIJSl`fLGoSUvXO&t|`JE-$y-FS4uW+GI zKcl9LMv;eKphe_dZQ!y_xPaFlsCM_5S1uN2c$x0!nz-Qd0^e 0 - first = models[0] - for other in models: - assert other.sources == first.sources - assert other.samplerate == first.samplerate - assert other.audio_channels == first.audio_channels - if segment is not None: - other.segment = segment - - self.audio_channels = first.audio_channels - self.samplerate = first.samplerate - self.sources = first.sources - self.models = nn.ModuleList(models) - - if weights is None: - weights = [[1. for _ in first.sources] for _ in models] - else: - assert len(weights) == len(models) - for weight in weights: - assert len(weight) == len(first.sources) - self.weights = weights - - def forward(self, x): - raise NotImplementedError("Call `apply_model` on this.") - - -class TensorChunk: - def __init__(self, tensor, offset=0, length=None): - total_length = tensor.shape[-1] - assert offset >= 0 - assert offset < total_length - - if length is None: - length = total_length - offset - else: - length = min(total_length - offset, length) - - self.tensor = tensor - self.offset = offset - self.length = length - self.device = tensor.device - - @property - def shape(self): - shape = list(self.tensor.shape) - shape[-1] = self.length - return shape - - def padded(self, target_length): - delta = target_length - self.length - total_length = self.tensor.shape[-1] - assert delta >= 0 - - start = self.offset - delta // 2 - end = start + target_length - - correct_start = max(0, start) - correct_end = min(total_length, end) - - pad_left = correct_start - start - pad_right = end - correct_end - - out = F.pad(self.tensor[..., correct_start:correct_end], (pad_left, pad_right)) - assert out.shape[-1] == target_length - return out - - -def tensor_chunk(tensor_or_chunk): - if isinstance(tensor_or_chunk, TensorChunk): - return tensor_or_chunk - else: - assert isinstance(tensor_or_chunk, th.Tensor) - return TensorChunk(tensor_or_chunk) - - -def apply_model(model, mix, shifts=1, split=True, - overlap=0.25, transition_power=1., progress=False, device=None, - num_workers=0, pool=None): - """ - Apply model to a given mixture. - - Args: - shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec - and apply the oppositve shift to the output. This is repeated `shifts` time and - all predictions are averaged. This effectively makes the model time equivariant - and improves SDR by up to 0.2 points. - split (bool): if True, the input will be broken down in 8 seconds extracts - and predictions will be performed individually on each and concatenated. - Useful for model with large memory footprint like Tasnet. - progress (bool): if True, show a progress bar (requires split=True) - device (torch.device, str, or None): if provided, device on which to - execute the computation, otherwise `mix.device` is assumed. - When `device` is different from `mix.device`, only local computations will - be on `device`, while the entire tracks will be stored on `mix.device`. - """ - if device is None: - device = mix.device - else: - device = th.device(device) - if pool is None: - if num_workers > 0 and device.type == 'cpu': - pool = ThreadPoolExecutor(num_workers) - else: - pool = DummyPoolExecutor() - kwargs = { - 'shifts': shifts, - 'split': split, - 'overlap': overlap, - 'transition_power': transition_power, - 'progress': progress, - 'device': device, - 'pool': pool, - } - if isinstance(model, BagOfModels): - # Special treatment for bag of model. - # We explicitely apply multiple times `apply_model` so that the random shifts - # are different for each model. - estimates = 0 - totals = [0] * len(model.sources) - for sub_model, weight in zip(model.models, model.weights): - original_model_device = next(iter(sub_model.parameters())).device - sub_model.to(device) - - out = apply_model(sub_model, mix, **kwargs) - sub_model.to(original_model_device) - for k, inst_weight in enumerate(weight): - out[:, k, :, :] *= inst_weight - totals[k] += inst_weight - estimates += out - del out - - for k in range(estimates.shape[1]): - estimates[:, k, :, :] /= totals[k] - return estimates - - model.to(device) - assert transition_power >= 1, "transition_power < 1 leads to weird behavior." - batch, channels, length = mix.shape - if split: - kwargs['split'] = False - out = th.zeros(batch, len(model.sources), channels, length, device=mix.device) - sum_weight = th.zeros(length, device=mix.device) - segment = int(model.samplerate * model.segment) - stride = int((1 - overlap) * segment) - offsets = range(0, length, stride) - scale = stride / model.samplerate - # We start from a triangle shaped weight, with maximal weight in the middle - # of the segment. Then we normalize and take to the power `transition_power`. - # Large values of transition power will lead to sharper transitions. - weight = th.cat([th.arange(1, segment // 2 + 1, device=device), - th.arange(segment - segment // 2, 0, -1, device=device)]) - assert len(weight) == segment - # If the overlap < 50%, this will translate to linear transition when - # transition_power is 1. - weight = (weight / weight.max())**transition_power - futures = [] - for offset in offsets: - chunk = TensorChunk(mix, offset, segment) - future = pool.submit(apply_model, model, chunk, **kwargs) - futures.append((future, offset)) - offset += segment - if progress: - futures = tqdm.tqdm(futures, unit_scale=scale, ncols=120, unit='seconds') - for future, offset in futures: - chunk_out = future.result() - chunk_length = chunk_out.shape[-1] - out[..., offset:offset + segment] += (weight[:chunk_length] * chunk_out).to(mix.device) - sum_weight[offset:offset + segment] += weight[:chunk_length].to(mix.device) - assert sum_weight.min() > 0 - out /= sum_weight - return out - elif shifts: - kwargs['shifts'] = 0 - max_shift = int(0.5 * model.samplerate) - mix = tensor_chunk(mix) - padded_mix = mix.padded(length + 2 * max_shift) - out = 0 - for _ in range(shifts): - offset = random.randint(0, max_shift) - shifted = TensorChunk(padded_mix, offset, length + max_shift - offset) - shifted_out = apply_model(model, shifted, **kwargs) - out += shifted_out[..., max_shift - offset:] - out /= shifts - return out - else: - if hasattr(model, 'valid_length'): - valid_length = model.valid_length(length) - else: - valid_length = length - mix = tensor_chunk(mix) - padded_mix = mix.padded(valid_length).to(device) - with th.no_grad(): - out = model(padded_mix) - return center_trim(out, length) diff --git a/demucs/audio.py b/demucs/audio.py deleted file mode 100644 index d1ba194..0000000 --- a/demucs/audio.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -import json -import subprocess as sp -from pathlib import Path - -import lameenc -import julius -import numpy as np -import torch -import torchaudio as ta - -from .utils import temp_filenames - - -def _read_info(path): - stdout_data = sp.check_output([ - 'ffprobe', "-loglevel", "panic", - str(path), '-print_format', 'json', '-show_format', '-show_streams' - ]) - return json.loads(stdout_data.decode('utf-8')) - - -class AudioFile: - """ - Allows to read audio from any format supported by ffmpeg, as well as resampling or - converting to mono on the fly. See :method:`read` for more details. - """ - def __init__(self, path: Path): - self.path = Path(path) - self._info = None - - def __repr__(self): - features = [("path", self.path)] - features.append(("samplerate", self.samplerate())) - features.append(("channels", self.channels())) - features.append(("streams", len(self))) - features_str = ", ".join(f"{name}={value}" for name, value in features) - return f"AudioFile({features_str})" - - @property - def info(self): - if self._info is None: - self._info = _read_info(self.path) - return self._info - - @property - def duration(self): - return float(self.info['format']['duration']) - - @property - def _audio_streams(self): - return [ - index for index, stream in enumerate(self.info["streams"]) - if stream["codec_type"] == "audio" - ] - - def __len__(self): - return len(self._audio_streams) - - def channels(self, stream=0): - return int(self.info['streams'][self._audio_streams[stream]]['channels']) - - def samplerate(self, stream=0): - return int(self.info['streams'][self._audio_streams[stream]]['sample_rate']) - - def read(self, - seek_time=None, - duration=None, - streams=slice(None), - samplerate=None, - channels=None, - temp_folder=None): - """ - Slightly more efficient implementation than stempeg, - in particular, this will extract all stems at once - rather than having to loop over one file multiple times - for each stream. - - Args: - seek_time (float): seek time in seconds or None if no seeking is needed. - duration (float): duration in seconds to extract or None to extract until the end. - streams (slice, int or list): streams to extract, can be a single int, a list or - a slice. If it is a slice or list, the output will be of size [S, C, T] - with S the number of streams, C the number of channels and T the number of samples. - If it is an int, the output will be [C, T]. - samplerate (int): if provided, will resample on the fly. If None, no resampling will - be done. Original sampling rate can be obtained with :method:`samplerate`. - channels (int): if 1, will convert to mono. We do not rely on ffmpeg for that - as ffmpeg automatically scale by +3dB to conserve volume when playing on speakers. - See https://sound.stackexchange.com/a/42710. - Our definition of mono is simply the average of the two channels. Any other - value will be ignored. - temp_folder (str or Path or None): temporary folder to use for decoding. - - - """ - streams = np.array(range(len(self)))[streams] - single = not isinstance(streams, np.ndarray) - if single: - streams = [streams] - - if duration is None: - target_size = None - query_duration = None - else: - target_size = int((samplerate or self.samplerate()) * duration) - query_duration = float((target_size + 1) / (samplerate or self.samplerate())) - - with temp_filenames(len(streams)) as filenames: - command = ['ffmpeg', '-y'] - command += ['-loglevel', 'panic'] - if seek_time: - command += ['-ss', str(seek_time)] - command += ['-i', str(self.path)] - for stream, filename in zip(streams, filenames): - command += ['-map', f'0:{self._audio_streams[stream]}'] - if query_duration is not None: - command += ['-t', str(query_duration)] - command += ['-threads', '1'] - command += ['-f', 'f32le'] - if samplerate is not None: - command += ['-ar', str(samplerate)] - command += [filename] - - sp.run(command, check=True) - wavs = [] - for filename in filenames: - wav = np.fromfile(filename, dtype=np.float32) - wav = torch.from_numpy(wav) - wav = wav.view(-1, self.channels()).t() - if channels is not None: - wav = convert_audio_channels(wav, channels) - if target_size is not None: - wav = wav[..., :target_size] - wavs.append(wav) - wav = torch.stack(wavs, dim=0) - if single: - wav = wav[0] - return wav - - -def convert_audio_channels(wav, channels=2): - """Convert audio to the given number of channels.""" - *shape, src_channels, length = wav.shape - if src_channels == channels: - pass - elif channels == 1: - # Case 1: - # The caller asked 1-channel audio, but the stream have multiple - # channels, downmix all channels. - wav = wav.mean(dim=-2, keepdim=True) - elif src_channels == 1: - # Case 2: - # The caller asked for multiple channels, but the input file have - # one single channel, replicate the audio over all channels. - wav = wav.expand(*shape, channels, length) - elif src_channels >= channels: - # Case 3: - # The caller asked for multiple channels, and the input file have - # more channels than requested. In that case return the first channels. - wav = wav[..., :channels, :] - else: - # Case 4: What is a reasonable choice here? - raise ValueError('The audio file has less channels than requested but is not mono.') - return wav - - -def convert_audio(wav, from_samplerate, to_samplerate, channels): - """Convert audio from a given samplerate to a target one and target number of channels.""" - wav = convert_audio_channels(wav, channels) - return julius.resample_frac(wav, from_samplerate, to_samplerate) - - -def i16_pcm(wav): - """Convert audio to 16 bits integer PCM format.""" - if wav.dtype.is_floating_point: - return (wav.clamp_(-1, 1) * (2**15 - 1)).short() - else: - return wav - - -def f32_pcm(wav): - """Convert audio to float 32 bits PCM format.""" - if wav.dtype.is_floating_point: - return wav - else: - return wav.float() / (2**15 - 1) - - -def as_dtype_pcm(wav, dtype): - """Convert audio to either f32 pcm or i16 pcm depending on the given dtype.""" - if wav.dtype.is_floating_point: - return f32_pcm(wav) - else: - return i16_pcm(wav) - - -def encode_mp3(wav, path, samplerate=44100, bitrate=320, verbose=False): - """Save given audio as mp3. This should work on all OSes.""" - C, T = wav.shape - wav = i16_pcm(wav) - encoder = lameenc.Encoder() - encoder.set_bit_rate(bitrate) - encoder.set_in_sample_rate(samplerate) - encoder.set_channels(C) - encoder.set_quality(2) # 2-highest, 7-fastest - if not verbose: - encoder.silence() - wav = wav.transpose(0, 1).numpy() - mp3_data = encoder.encode(wav.tobytes()) - mp3_data += encoder.flush() - with open(path, "wb") as f: - f.write(mp3_data) - - -def prevent_clip(wav, mode='rescale'): - """ - different strategies for avoiding raw clipping. - """ - assert wav.dtype.is_floating_point, "too late for clipping" - if mode == 'rescale': - wav = wav / max(1.01 * wav.abs().max(), 1) - elif mode == 'clamp': - wav = wav.clamp(-0.99, 0.99) - elif mode == 'tanh': - wav = torch.tanh(wav) - else: - raise ValueError(f"Invalid mode {mode}") - return wav - - -def save_audio(wav, path, samplerate, bitrate=320, clip='rescale', - bits_per_sample=16, as_float=False): - """Save audio file, automatically preventing clipping if necessary - based on the given `clip` strategy. If the path ends in `.mp3`, this - will save as mp3 with the given `bitrate`. - """ - wav = prevent_clip(wav, mode=clip) - path = Path(path) - suffix = path.suffix.lower() - if suffix == ".mp3": - encode_mp3(wav, path, samplerate, bitrate) - elif suffix == ".wav": - if as_float: - bits_per_sample = 32 - encoding = 'PCM_F' - else: - encoding = 'PCM_S' - ta.save(str(path), wav, sample_rate=samplerate, - encoding=encoding, bits_per_sample=bits_per_sample) - else: - raise ValueError(f"Invalid suffix for path: {suffix}") diff --git a/demucs/demucs.py b/demucs/demucs.py deleted file mode 100644 index d2c08e7..0000000 --- a/demucs/demucs.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp - -import julius -import torch -from torch import nn -from torch.nn import functional as F - -from .states import capture_init -from .utils import center_trim, unfold - - -class BLSTM(nn.Module): - """ - BiLSTM with same hidden units as input dim. - If `max_steps` is not None, input will be splitting in overlapping - chunks and the LSTM applied separately on each chunk. - """ - def __init__(self, dim, layers=1, max_steps=None, skip=False): - super().__init__() - assert max_steps is None or max_steps % 4 == 0 - self.max_steps = max_steps - self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) - self.linear = nn.Linear(2 * dim, dim) - self.skip = skip - - def forward(self, x): - B, C, T = x.shape - y = x - framed = False - if self.max_steps is not None and T > self.max_steps: - width = self.max_steps - stride = width // 2 - frames = unfold(x, width, stride) - nframes = frames.shape[2] - framed = True - x = frames.permute(0, 2, 1, 3).reshape(-1, C, width) - - x = x.permute(2, 0, 1) - - x = self.lstm(x)[0] - x = self.linear(x) - x = x.permute(1, 2, 0) - if framed: - out = [] - frames = x.reshape(B, -1, C, width) - limit = stride // 2 - for k in range(nframes): - if k == 0: - out.append(frames[:, k, :, :-limit]) - elif k == nframes - 1: - out.append(frames[:, k, :, limit:]) - else: - out.append(frames[:, k, :, limit:-limit]) - out = torch.cat(out, -1) - out = out[..., :T] - x = out - if self.skip: - x = x + y - return x - - -def rescale_conv(conv, reference): - """Rescale initial weight scale. It is unclear why it helps but it certainly does. - """ - std = conv.weight.std().detach() - scale = (std / reference)**0.5 - conv.weight.data /= scale - if conv.bias is not None: - conv.bias.data /= scale - - -def rescale_module(module, reference): - for sub in module.modules(): - if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)): - rescale_conv(sub, reference) - - -class LayerScale(nn.Module): - """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). - This rescales diagonaly residual outputs close to 0 initially, then learnt. - """ - def __init__(self, channels: int, init: float = 0): - super().__init__() - self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True)) - self.scale.data[:] = init - - def forward(self, x): - return self.scale[:, None] * x - - -class DConv(nn.Module): - """ - New residual branches in each encoder layer. - This alternates dilated convolutions, potentially with LSTMs and attention. - Also before entering each residual branch, dimension is projected on a smaller subspace, - e.g. of dim `channels // compress`. - """ - def __init__(self, channels: int, compress: float = 4, depth: int = 2, init: float = 1e-4, - norm=True, attn=False, heads=4, ndecay=4, lstm=False, gelu=True, - kernel=3, dilate=True): - """ - Args: - channels: input/output channels for residual branch. - compress: amount of channel compression inside the branch. - depth: number of layers in the residual branch. Each layer has its own - projection, and potentially LSTM and attention. - init: initial scale for LayerNorm. - norm: use GroupNorm. - attn: use LocalAttention. - heads: number of heads for the LocalAttention. - ndecay: number of decay controls in the LocalAttention. - lstm: use LSTM. - gelu: Use GELU activation. - kernel: kernel size for the (dilated) convolutions. - dilate: if true, use dilation, increasing with the depth. - """ - - super().__init__() - assert kernel % 2 == 1 - self.channels = channels - self.compress = compress - self.depth = abs(depth) - dilate = depth > 0 - - norm_fn: tp.Callable[[int], nn.Module] - norm_fn = lambda d: nn.Identity() # noqa - if norm: - norm_fn = lambda d: nn.GroupNorm(1, d) # noqa - - hidden = int(channels / compress) - - act: tp.Type[nn.Module] - if gelu: - act = nn.GELU - else: - act = nn.ReLU - - self.layers = nn.ModuleList([]) - for d in range(self.depth): - dilation = 2 ** d if dilate else 1 - padding = dilation * (kernel // 2) - mods = [ - nn.Conv1d(channels, hidden, kernel, dilation=dilation, padding=padding), - norm_fn(hidden), act(), - nn.Conv1d(hidden, 2 * channels, 1), - norm_fn(2 * channels), nn.GLU(1), - LayerScale(channels, init), - ] - if attn: - mods.insert(3, LocalState(hidden, heads=heads, ndecay=ndecay)) - if lstm: - mods.insert(3, BLSTM(hidden, layers=2, max_steps=200, skip=True)) - layer = nn.Sequential(*mods) - self.layers.append(layer) - - def forward(self, x): - for layer in self.layers: - x = x + layer(x) - return x - - -class LocalState(nn.Module): - """Local state allows to have attention based only on data (no positional embedding), - but while setting a constraint on the time window (e.g. decaying penalty term). - - Also a failed experiments with trying to provide some frequency based attention. - """ - def __init__(self, channels: int, heads: int = 4, nfreqs: int = 0, ndecay: int = 4): - super().__init__() - assert channels % heads == 0, (channels, heads) - self.heads = heads - self.nfreqs = nfreqs - self.ndecay = ndecay - self.content = nn.Conv1d(channels, channels, 1) - self.query = nn.Conv1d(channels, channels, 1) - self.key = nn.Conv1d(channels, channels, 1) - if nfreqs: - self.query_freqs = nn.Conv1d(channels, heads * nfreqs, 1) - if ndecay: - self.query_decay = nn.Conv1d(channels, heads * ndecay, 1) - # Initialize decay close to zero (there is a sigmoid), for maximum initial window. - self.query_decay.weight.data *= 0.01 - assert self.query_decay.bias is not None # stupid type checker - self.query_decay.bias.data[:] = -2 - self.proj = nn.Conv1d(channels + heads * nfreqs, channels, 1) - - def forward(self, x): - B, C, T = x.shape - heads = self.heads - indexes = torch.arange(T, device=x.device, dtype=x.dtype) - # left index are keys, right index are queries - delta = indexes[:, None] - indexes[None, :] - - queries = self.query(x).view(B, heads, -1, T) - keys = self.key(x).view(B, heads, -1, T) - # t are keys, s are queries - dots = torch.einsum("bhct,bhcs->bhts", keys, queries) - dots /= keys.shape[2]**0.5 - if self.nfreqs: - periods = torch.arange(1, self.nfreqs + 1, device=x.device, dtype=x.dtype) - freq_kernel = torch.cos(2 * math.pi * delta / periods.view(-1, 1, 1)) - freq_q = self.query_freqs(x).view(B, heads, -1, T) / self.nfreqs ** 0.5 - dots += torch.einsum("fts,bhfs->bhts", freq_kernel, freq_q) - if self.ndecay: - decays = torch.arange(1, self.ndecay + 1, device=x.device, dtype=x.dtype) - decay_q = self.query_decay(x).view(B, heads, -1, T) - decay_q = torch.sigmoid(decay_q) / 2 - decay_kernel = - decays.view(-1, 1, 1) * delta.abs() / self.ndecay**0.5 - dots += torch.einsum("fts,bhfs->bhts", decay_kernel, decay_q) - - # Kill self reference. - dots.masked_fill_(torch.eye(T, device=dots.device, dtype=torch.bool), -100) - weights = torch.softmax(dots, dim=2) - - content = self.content(x).view(B, heads, -1, T) - result = torch.einsum("bhts,bhct->bhcs", weights, content) - if self.nfreqs: - time_sig = torch.einsum("bhts,fts->bhfs", weights, freq_kernel) - result = torch.cat([result, time_sig], 2) - result = result.reshape(B, -1, T) - return x + self.proj(result) - - -class Demucs(nn.Module): - @capture_init - def __init__(self, - sources, - # Channels - audio_channels=2, - channels=64, - growth=2., - # Main structure - depth=6, - rewrite=True, - lstm_layers=0, - # Convolutions - kernel_size=8, - stride=4, - context=1, - # Activations - gelu=True, - glu=True, - # Normalization - norm_starts=4, - norm_groups=4, - # DConv residual branch - dconv_mode=1, - dconv_depth=2, - dconv_comp=4, - dconv_attn=4, - dconv_lstm=4, - dconv_init=1e-4, - # Pre/post processing - normalize=True, - resample=True, - # Weight init - rescale=0.1, - # Metadata - samplerate=44100, - segment=4 * 10): - """ - Args: - sources (list[str]): list of source names - audio_channels (int): stereo or mono - channels (int): first convolution channels - depth (int): number of encoder/decoder layers - growth (float): multiply (resp divide) number of channels by that - for each layer of the encoder (resp decoder) - depth (int): number of layers in the encoder and in the decoder. - rewrite (bool): add 1x1 convolution to each layer. - lstm_layers (int): number of lstm layers, 0 = no lstm. Deactivated - by default, as this is now replaced by the smaller and faster small LSTMs - in the DConv branches. - kernel_size (int): kernel size for convolutions - stride (int): stride for convolutions - context (int): kernel size of the convolution in the - decoder before the transposed convolution. If > 1, - will provide some context from neighboring time steps. - gelu: use GELU activation function. - glu (bool): use glu instead of ReLU for the 1x1 rewrite conv. - norm_starts: layer at which group norm starts being used. - decoder layers are numbered in reverse order. - norm_groups: number of groups for group norm. - dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. - dconv_depth: depth of residual DConv branch. - dconv_comp: compression of DConv branch. - dconv_attn: adds attention layers in DConv branch starting at this layer. - dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. - dconv_init: initial scale for the DConv branch LayerScale. - normalize (bool): normalizes the input audio on the fly, and scales back - the output by the same amount. - resample (bool): upsample x2 the input and downsample /2 the output. - rescale (int): rescale initial weights of convolutions - to get their standard deviation closer to `rescale`. - samplerate (int): stored as meta information for easing - future evaluations of the model. - segment (float): duration of the chunks of audio to ideally evaluate the model on. - This is used by `demucs.apply.apply_model`. - """ - - super().__init__() - self.audio_channels = audio_channels - self.sources = sources - self.kernel_size = kernel_size - self.context = context - self.stride = stride - self.depth = depth - self.resample = resample - self.channels = channels - self.normalize = normalize - self.samplerate = samplerate - self.segment = segment - self.encoder = nn.ModuleList() - self.decoder = nn.ModuleList() - self.skip_scales = nn.ModuleList() - - if glu: - activation = nn.GLU(dim=1) - ch_scale = 2 - else: - activation = nn.ReLU() - ch_scale = 1 - if gelu: - act2 = nn.GELU - else: - act2 = nn.ReLU - - in_channels = audio_channels - padding = 0 - for index in range(depth): - norm_fn = lambda d: nn.Identity() # noqa - if index >= norm_starts: - norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa - - encode = [] - encode += [ - nn.Conv1d(in_channels, channels, kernel_size, stride), - norm_fn(channels), - act2(), - ] - attn = index >= dconv_attn - lstm = index >= dconv_lstm - if dconv_mode & 1: - encode += [DConv(channels, depth=dconv_depth, init=dconv_init, - compress=dconv_comp, attn=attn, lstm=lstm)] - if rewrite: - encode += [ - nn.Conv1d(channels, ch_scale * channels, 1), - norm_fn(ch_scale * channels), activation] - self.encoder.append(nn.Sequential(*encode)) - - decode = [] - if index > 0: - out_channels = in_channels - else: - out_channels = len(self.sources) * audio_channels - if rewrite: - decode += [ - nn.Conv1d(channels, ch_scale * channels, 2 * context + 1, padding=context), - norm_fn(ch_scale * channels), activation] - if dconv_mode & 2: - decode += [DConv(channels, depth=dconv_depth, init=dconv_init, - compress=dconv_comp, attn=attn, lstm=lstm)] - decode += [nn.ConvTranspose1d(channels, out_channels, - kernel_size, stride, padding=padding)] - if index > 0: - decode += [norm_fn(out_channels), act2()] - self.decoder.insert(0, nn.Sequential(*decode)) - in_channels = channels - channels = int(growth * channels) - - channels = in_channels - if lstm_layers: - self.lstm = BLSTM(channels, lstm_layers) - else: - self.lstm = None - - if rescale: - rescale_module(self, reference=rescale) - - def valid_length(self, length): - """ - Return the nearest valid length to use with the model so that - there is no time steps left over in a convolution, e.g. for all - layers, size of the input - kernel_size % stride = 0. - - Note that input are automatically padded if necessary to ensure that the output - has the same length as the input. - """ - if self.resample: - length *= 2 - - for _ in range(self.depth): - length = math.ceil((length - self.kernel_size) / self.stride) + 1 - length = max(1, length) - - for idx in range(self.depth): - length = (length - 1) * self.stride + self.kernel_size - - if self.resample: - length = math.ceil(length / 2) - return int(length) - - def forward(self, mix): - x = mix - length = x.shape[-1] - - if self.normalize: - mono = mix.mean(dim=1, keepdim=True) - mean = mono.mean(dim=-1, keepdim=True) - std = mono.std(dim=-1, keepdim=True) - x = (x - mean) / (1e-5 + std) - else: - mean = 0 - std = 1 - - delta = self.valid_length(length) - length - x = F.pad(x, (delta // 2, delta - delta // 2)) - - if self.resample: - x = julius.resample_frac(x, 1, 2) - - saved = [] - for encode in self.encoder: - x = encode(x) - saved.append(x) - - if self.lstm: - x = self.lstm(x) - - for decode in self.decoder: - skip = saved.pop(-1) - skip = center_trim(skip, x) - x = decode(x + skip) - - if self.resample: - x = julius.resample_frac(x, 2, 1) - x = x * std + mean - x = center_trim(x, length) - x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) - return x - - def load_state_dict(self, state, strict=True): - # fix a mismatch with previous generation Demucs models. - for idx in range(self.depth): - for a in ['encoder', 'decoder']: - for b in ['bias', 'weight']: - new = f'{a}.{idx}.3.{b}' - old = f'{a}.{idx}.2.{b}' - if old in state and new not in state: - state[new] = state.pop(old) - super().load_state_dict(state, strict=strict) diff --git a/demucs/distrib.py b/demucs/distrib.py deleted file mode 100644 index b73011a..0000000 --- a/demucs/distrib.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Distributed training utilities. -""" -import logging -import pickle - -import numpy as np -import torch -from torch.utils.data.distributed import DistributedSampler -from torch.utils.data import DataLoader, Subset -from torch.nn.parallel.distributed import DistributedDataParallel - -from dora import distrib as dora_distrib - -logger = logging.getLogger(__name__) -rank = 0 -world_size = 1 - - -def init(): - global rank, world_size - if not torch.distributed.is_initialized(): - dora_distrib.init() - rank = dora_distrib.rank() - world_size = dora_distrib.world_size() - - -def average(metrics, count=1.): - if isinstance(metrics, dict): - keys, values = zip(*sorted(metrics.items())) - values = average(values, count) - return dict(zip(keys, values)) - if world_size == 1: - return metrics - tensor = torch.tensor(list(metrics) + [1], device='cuda', dtype=torch.float32) - tensor *= count - torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM) - return (tensor[:-1] / tensor[-1]).cpu().numpy().tolist() - - -def wrap(model): - if world_size == 1: - return model - else: - return DistributedDataParallel( - model, - # find_unused_parameters=True, - device_ids=[torch.cuda.current_device()], - output_device=torch.cuda.current_device()) - - -def barrier(): - if world_size > 1: - torch.distributed.barrier() - - -def share(obj=None, src=0): - if world_size == 1: - return obj - size = torch.empty(1, device='cuda', dtype=torch.long) - if rank == src: - dump = pickle.dumps(obj) - size[0] = len(dump) - torch.distributed.broadcast(size, src=src) - # size variable is now set to the length of pickled obj in all processes - - if rank == src: - buffer = torch.from_numpy(np.frombuffer(dump, dtype=np.uint8).copy()).cuda() - else: - buffer = torch.empty(size[0].item(), device='cuda', dtype=torch.uint8) - torch.distributed.broadcast(buffer, src=src) - # buffer variable is now set to pickled obj in all processes - - if rank != src: - obj = pickle.loads(buffer.cpu().numpy().tobytes()) - logger.debug(f"Shared object of size {len(buffer)}") - return obj - - -def loader(dataset, *args, shuffle=False, klass=DataLoader, **kwargs): - """ - Create a dataloader properly in case of distributed training. - If a gradient is going to be computed you must set `shuffle=True`. - """ - if world_size == 1: - return klass(dataset, *args, shuffle=shuffle, **kwargs) - - if shuffle: - # train means we will compute backward, we use DistributedSampler - sampler = DistributedSampler(dataset) - # We ignore shuffle, DistributedSampler already shuffles - return klass(dataset, *args, **kwargs, sampler=sampler) - else: - # We make a manual shard, as DistributedSampler otherwise replicate some examples - dataset = Subset(dataset, list(range(rank, len(dataset), world_size))) - return klass(dataset, *args, shuffle=shuffle, **kwargs) diff --git a/demucs/ema.py b/demucs/ema.py deleted file mode 100644 index 958c595..0000000 --- a/demucs/ema.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Inspired from https://github.com/rwightman/pytorch-image-models -from contextlib import contextmanager - -import torch - -from .states import swap_state - - -class ModelEMA: - """ - Perform EMA on a model. You can switch to the EMA weights temporarily - with the `swap` method. - - ema = ModelEMA(model) - with ema.swap(): - # compute valid metrics with averaged model. - """ - def __init__(self, model, decay=0.9999, unbias=True, device='cpu'): - self.decay = decay - self.model = model - self.state = {} - self.count = 0 - self.device = device - self.unbias = unbias - - self._init() - - def _init(self): - for key, val in self.model.state_dict().items(): - if val.dtype != torch.float32: - continue - device = self.device or val.device - if key not in self.state: - self.state[key] = val.detach().to(device, copy=True) - - def update(self): - if self.unbias: - self.count = self.count * self.decay + 1 - w = 1 / self.count - else: - w = 1 - self.decay - for key, val in self.model.state_dict().items(): - if val.dtype != torch.float32: - continue - device = self.device or val.device - self.state[key].mul_(1 - w) - self.state[key].add_(val.detach().to(device), alpha=w) - - @contextmanager - def swap(self): - with swap_state(self.model, self.state): - yield - - def state_dict(self): - return {'state': self.state, 'count': self.count} - - def load_state_dict(self, state): - self.count = state['count'] - for k, v in state['state'].items(): - self.state[k].copy_(v) diff --git a/demucs/evaluate.py b/demucs/evaluate.py deleted file mode 100644 index badb35e..0000000 --- a/demucs/evaluate.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -"""Test time evaluation, either using the original SDR from [Vincent et al. 2006] -or the newest SDR definition from the MDX 2021 competition (this one will -be reported as `nsdr` for `new sdr`). -""" - -from concurrent import futures -import logging - -from dora.log import LogProgress -import numpy as np -import musdb -import museval -import torch as th - -from .apply import apply_model -from .audio import convert_audio, save_audio -from . import distrib -from .utils import DummyPoolExecutor - - -logger = logging.getLogger(__name__) - - -def new_sdr(references, estimates): - """ - Compute the SDR according to the MDX challenge definition. - Adapted from AIcrowd/music-demixing-challenge-starter-kit (MIT license) - """ - assert references.dim() == 4 - assert estimates.dim() == 4 - delta = 1e-7 # avoid numerical errors - num = th.sum(th.square(references), dim=(2, 3)) - den = th.sum(th.square(references - estimates), dim=(2, 3)) - num += delta - den += delta - scores = 10 * th.log10(num / den) - return scores - - -def eval_track(references, estimates, win, hop, compute_sdr=True): - references = references.transpose(1, 2).double() - estimates = estimates.transpose(1, 2).double() - - new_scores = new_sdr(references.cpu()[None], estimates.cpu()[None])[0] - - if not compute_sdr: - return None, new_scores - else: - references = references.numpy() - estimates = estimates.numpy() - scores = museval.metrics.bss_eval( - references, estimates, - compute_permutation=False, - window=win, - hop=hop, - framewise_filters=False, - bsseval_sources_version=False)[:-1] - return scores, new_scores - - -def evaluate(solver, compute_sdr=False): - """ - Evaluate model using museval. - `new_only` means using only the MDX definition of the SDR, which is much faster to evaluate. - """ - - args = solver.args - - output_dir = solver.folder / "results" - output_dir.mkdir(exist_ok=True, parents=True) - json_folder = solver.folder / "results/test" - json_folder.mkdir(exist_ok=True, parents=True) - - # we load tracks from the original musdb set - if args.test.nonhq is None: - test_set = musdb.DB(args.dset.musdb, subsets=["test"], is_wav=True) - else: - test_set = musdb.DB(args.test.nonhq, subsets=["test"], is_wav=False) - src_rate = args.dset.musdb_samplerate - - eval_device = 'cpu' - - model = solver.model - win = int(1. * model.samplerate) - hop = int(1. * model.samplerate) - - indexes = range(distrib.rank, len(test_set), distrib.world_size) - indexes = LogProgress(logger, indexes, updates=args.misc.num_prints, - name='Eval') - pendings = [] - - pool = futures.ProcessPoolExecutor if args.test.workers else DummyPoolExecutor - with pool(args.test.workers) as pool: - for index in indexes: - track = test_set.tracks[index] - - mix = th.from_numpy(track.audio).t().float() - if mix.dim() == 1: - mix = mix[None] - mix = mix.to(solver.device) - ref = mix.mean(dim=0) # mono mixture - mix = (mix - ref.mean()) / ref.std() - mix = convert_audio(mix, src_rate, model.samplerate, model.audio_channels) - estimates = apply_model(model, mix[None], - shifts=args.test.shifts, split=args.test.split, - overlap=args.test.overlap)[0] - estimates = estimates * ref.std() + ref.mean() - estimates = estimates.to(eval_device) - - references = th.stack( - [th.from_numpy(track.targets[name].audio).t() for name in model.sources]) - if references.dim() == 2: - references = references[:, None] - references = references.to(eval_device) - references = convert_audio(references, src_rate, - model.samplerate, model.audio_channels) - if args.test.save: - folder = solver.folder / "wav" / track.name - folder.mkdir(exist_ok=True, parents=True) - for name, estimate in zip(model.sources, estimates): - save_audio(estimate.cpu(), folder / (name + ".mp3"), model.samplerate) - - pendings.append((track.name, pool.submit( - eval_track, references, estimates, win=win, hop=hop, compute_sdr=compute_sdr))) - - pendings = LogProgress(logger, pendings, updates=args.misc.num_prints, - name='Eval (BSS)') - tracks = {} - for track_name, pending in pendings: - pending = pending.result() - scores, nsdrs = pending - tracks[track_name] = {} - for idx, target in enumerate(model.sources): - tracks[track_name][target] = {'nsdr': [float(nsdrs[idx])]} - if scores is not None: - (sdr, isr, sir, sar) = scores - for idx, target in enumerate(model.sources): - values = { - "SDR": sdr[idx].tolist(), - "SIR": sir[idx].tolist(), - "ISR": isr[idx].tolist(), - "SAR": sar[idx].tolist() - } - tracks[track_name][target].update(values) - - all_tracks = {} - for src in range(distrib.world_size): - all_tracks.update(distrib.share(tracks, src)) - - result = {} - metric_names = next(iter(all_tracks.values()))[model.sources[0]] - for metric_name in metric_names: - avg = 0 - avg_of_medians = 0 - for source in model.sources: - medians = [ - np.nanmedian(all_tracks[track][source][metric_name]) - for track in all_tracks.keys()] - mean = np.mean(medians) - median = np.median(medians) - result[metric_name.lower() + "_" + source] = mean - result[metric_name.lower() + "_med" + "_" + source] = median - avg += mean / len(model.sources) - avg_of_medians += median / len(model.sources) - result[metric_name.lower()] = avg - result[metric_name.lower() + "_med"] = avg_of_medians - return result diff --git a/demucs/hdemucs.py b/demucs/hdemucs.py deleted file mode 100644 index 864fd3f..0000000 --- a/demucs/hdemucs.py +++ /dev/null @@ -1,761 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -""" -This code contains the spectrogram and Hybrid version of Demucs. -""" -from copy import deepcopy -import math - -from openunmix.filtering import wiener -import torch -from torch import nn -from torch.nn import functional as F - -from .demucs import DConv, rescale_module -from .states import capture_init -from .spec import spectro, ispectro - - -class ScaledEmbedding(nn.Module): - """ - Boost learning rate for embeddings (with `scale`). - Also, can make embeddings continuous with `smooth`. - """ - def __init__(self, num_embeddings: int, embedding_dim: int, - scale: float = 10., smooth=False): - super().__init__() - self.embedding = nn.Embedding(num_embeddings, embedding_dim) - if smooth: - weight = torch.cumsum(self.embedding.weight.data, dim=0) - # when summing gaussian, overscale raises as sqrt(n), so we nornalize by that. - weight = weight / torch.arange(1, num_embeddings + 1).to(weight).sqrt()[:, None] - self.embedding.weight.data[:] = weight - self.embedding.weight.data /= scale - self.scale = scale - - @property - def weight(self): - return self.embedding.weight * self.scale - - def forward(self, x): - out = self.embedding(x) * self.scale - return out - - -class HEncLayer(nn.Module): - def __init__(self, chin, chout, kernel_size=8, stride=4, norm_groups=1, empty=False, - freq=True, dconv=True, norm=True, context=0, dconv_kw={}, pad=True, - rewrite=True): - """Encoder layer. This used both by the time and the frequency branch. - - Args: - chin: number of input channels. - chout: number of output channels. - norm_groups: number of groups for group norm. - empty: used to make a layer with just the first conv. this is used - before merging the time and freq. branches. - freq: this is acting on frequencies. - dconv: insert DConv residual branches. - norm: use GroupNorm. - context: context size for the 1x1 conv. - dconv_kw: list of kwargs for the DConv class. - pad: pad the input. Padding is done so that the output size is - always the input size / stride. - rewrite: add 1x1 conv at the end of the layer. - """ - super().__init__() - norm_fn = lambda d: nn.Identity() # noqa - if norm: - norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa - if pad: - pad = kernel_size // 4 - else: - pad = 0 - klass = nn.Conv1d - self.freq = freq - self.kernel_size = kernel_size - self.stride = stride - self.empty = empty - self.norm = norm - self.pad = pad - if freq: - kernel_size = [kernel_size, 1] - stride = [stride, 1] - pad = [pad, 0] - klass = nn.Conv2d - self.conv = klass(chin, chout, kernel_size, stride, pad) - if self.empty: - return - self.norm1 = norm_fn(chout) - self.rewrite = None - if rewrite: - self.rewrite = klass(chout, 2 * chout, 1 + 2 * context, 1, context) - self.norm2 = norm_fn(2 * chout) - - self.dconv = None - if dconv: - self.dconv = DConv(chout, **dconv_kw) - - def forward(self, x, inject=None): - """ - `inject` is used to inject the result from the time branch into the frequency branch, - when both have the same stride. - """ - if not self.freq and x.dim() == 4: - B, C, Fr, T = x.shape - x = x.view(B, -1, T) - - if not self.freq: - le = x.shape[-1] - if not le % self.stride == 0: - x = F.pad(x, (0, self.stride - (le % self.stride))) - y = self.conv(x) - if self.empty: - return y - if inject is not None: - assert inject.shape[-1] == y.shape[-1], (inject.shape, y.shape) - if inject.dim() == 3 and y.dim() == 4: - inject = inject[:, :, None] - y = y + inject - y = F.gelu(self.norm1(y)) - if self.dconv: - if self.freq: - B, C, Fr, T = y.shape - y = y.permute(0, 2, 1, 3).reshape(-1, C, T) - y = self.dconv(y) - if self.freq: - y = y.view(B, Fr, C, T).permute(0, 2, 1, 3) - if self.rewrite: - z = self.norm2(self.rewrite(y)) - z = F.glu(z, dim=1) - else: - z = y - return z - - -class MultiWrap(nn.Module): - """ - Takes one layer and replicate it N times. each replica will act - on a frequency band. All is done so that if the N replica have the same weights, - then this is exactly equivalent to applying the original module on all frequencies. - - This is a bit over-engineered to avoid edge artifacts when splitting - the frequency bands, but it is possible the naive implementation would work as well... - """ - def __init__(self, layer, split_ratios): - """ - Args: - layer: module to clone, must be either HEncLayer or HDecLayer. - split_ratios: list of float indicating which ratio to keep for each band. - """ - super().__init__() - self.split_ratios = split_ratios - self.layers = nn.ModuleList() - self.conv = isinstance(layer, HEncLayer) - assert not layer.norm - assert layer.freq - assert layer.pad - if not self.conv: - assert not layer.context_freq - for k in range(len(split_ratios) + 1): - lay = deepcopy(layer) - if self.conv: - lay.conv.padding = (0, 0) - else: - lay.pad = False - for m in lay.modules(): - if hasattr(m, 'reset_parameters'): - m.reset_parameters() - self.layers.append(lay) - - def forward(self, x, skip=None, length=None): - B, C, Fr, T = x.shape - - ratios = list(self.split_ratios) + [1] - start = 0 - outs = [] - for ratio, layer in zip(ratios, self.layers): - if self.conv: - pad = layer.kernel_size // 4 - if ratio == 1: - limit = Fr - frames = -1 - else: - limit = int(round(Fr * ratio)) - le = limit - start - if start == 0: - le += pad - frames = round((le - layer.kernel_size) / layer.stride + 1) - limit = start + (frames - 1) * layer.stride + layer.kernel_size - if start == 0: - limit -= pad - assert limit - start > 0, (limit, start) - assert limit <= Fr, (limit, Fr) - y = x[:, :, start:limit, :] - if start == 0: - y = F.pad(y, (0, 0, pad, 0)) - if ratio == 1: - y = F.pad(y, (0, 0, 0, pad)) - outs.append(layer(y)) - start = limit - layer.kernel_size + layer.stride - else: - if ratio == 1: - limit = Fr - else: - limit = int(round(Fr * ratio)) - last = layer.last - layer.last = True - - y = x[:, :, start:limit] - s = skip[:, :, start:limit] - out, _ = layer(y, s, None) - if outs: - outs[-1][:, :, -layer.stride:] += ( - out[:, :, :layer.stride] - layer.conv_tr.bias.view(1, -1, 1, 1)) - out = out[:, :, layer.stride:] - if ratio == 1: - out = out[:, :, :-layer.stride // 2, :] - if start == 0: - out = out[:, :, layer.stride // 2:, :] - outs.append(out) - layer.last = last - start = limit - out = torch.cat(outs, dim=2) - if not self.conv and not last: - out = F.gelu(out) - if self.conv: - return out - else: - return out, None - - -class HDecLayer(nn.Module): - def __init__(self, chin, chout, last=False, kernel_size=8, stride=4, norm_groups=1, empty=False, - freq=True, dconv=True, norm=True, context=1, dconv_kw={}, pad=True, - context_freq=True, rewrite=True): - """ - Same as HEncLayer but for decoder. See `HEncLayer` for documentation. - """ - super().__init__() - norm_fn = lambda d: nn.Identity() # noqa - if norm: - norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa - if pad: - pad = kernel_size // 4 - else: - pad = 0 - self.pad = pad - self.last = last - self.freq = freq - self.chin = chin - self.empty = empty - self.stride = stride - self.kernel_size = kernel_size - self.norm = norm - self.context_freq = context_freq - klass = nn.Conv1d - klass_tr = nn.ConvTranspose1d - if freq: - kernel_size = [kernel_size, 1] - stride = [stride, 1] - klass = nn.Conv2d - klass_tr = nn.ConvTranspose2d - self.conv_tr = klass_tr(chin, chout, kernel_size, stride) - self.norm2 = norm_fn(chout) - if self.empty: - return - self.rewrite = None - if rewrite: - if context_freq: - self.rewrite = klass(chin, 2 * chin, 1 + 2 * context, 1, context) - else: - self.rewrite = klass(chin, 2 * chin, [1, 1 + 2 * context], 1, - [0, context]) - self.norm1 = norm_fn(2 * chin) - - self.dconv = None - if dconv: - self.dconv = DConv(chin, **dconv_kw) - - def forward(self, x, skip, length): - if self.freq and x.dim() == 3: - B, C, T = x.shape - x = x.view(B, self.chin, -1, T) - - if not self.empty: - x = x + skip - - if self.rewrite: - y = F.glu(self.norm1(self.rewrite(x)), dim=1) - else: - y = x - if self.dconv: - if self.freq: - B, C, Fr, T = y.shape - y = y.permute(0, 2, 1, 3).reshape(-1, C, T) - y = self.dconv(y) - if self.freq: - y = y.view(B, Fr, C, T).permute(0, 2, 1, 3) - else: - y = x - assert skip is None - z = self.norm2(self.conv_tr(y)) - if self.freq: - if self.pad: - z = z[..., self.pad:-self.pad, :] - else: - z = z[..., self.pad:self.pad + length] - assert z.shape[-1] == length, (z.shape[-1], length) - if not self.last: - z = F.gelu(z) - return z, y - - -class HDemucs(nn.Module): - """ - Spectrogram and hybrid Demucs model. - The spectrogram model has the same structure as Demucs, except the first few layers are over the - frequency axis, until there is only 1 frequency, and then it moves to time convolutions. - Frequency layers can still access information across time steps thanks to the DConv residual. - - Hybrid model have a parallel time branch. At some layer, the time branch has the same stride - as the frequency branch and then the two are combined. The opposite happens in the decoder. - - Models can either use naive iSTFT from masking, Wiener filtering ([Ulhih et al. 2017]), - or complex as channels (CaC) [Choi et al. 2020]. Wiener filtering is based on - Open Unmix implementation [Stoter et al. 2019]. - - The loss is always on the temporal domain, by backpropagating through the above - output methods and iSTFT. This allows to define hybrid models nicely. However, this breaks - a bit Wiener filtering, as doing more iteration at test time will change the spectrogram - contribution, without changing the one from the waveform, which will lead to worse performance. - I tried using the residual option in OpenUnmix Wiener implementation, but it didn't improve. - CaC on the other hand provides similar performance for hybrid, and works naturally with - hybrid models. - - This model also uses frequency embeddings are used to improve efficiency on convolutions - over the freq. axis, following [Isik et al. 2020] (https://arxiv.org/pdf/2008.04470.pdf). - - Unlike classic Demucs, there is no resampling here, and normalization is always applied. - """ - @capture_init - def __init__(self, - sources, - # Channels - audio_channels=2, - channels=48, - channels_time=None, - growth=2, - # STFT - nfft=4096, - wiener_iters=0, - end_iters=0, - wiener_residual=False, - cac=True, - # Main structure - depth=6, - rewrite=True, - hybrid=True, - hybrid_old=False, - # Frequency branch - multi_freqs=None, - multi_freqs_depth=2, - freq_emb=0.2, - emb_scale=10, - emb_smooth=True, - # Convolutions - kernel_size=8, - time_stride=2, - stride=4, - context=1, - context_enc=0, - # Normalization - norm_starts=4, - norm_groups=4, - # DConv residual branch - dconv_mode=1, - dconv_depth=2, - dconv_comp=4, - dconv_attn=4, - dconv_lstm=4, - dconv_init=1e-4, - # Weight init - rescale=0.1, - # Metadata - samplerate=44100, - segment=4 * 10): - """ - Args: - sources (list[str]): list of source names. - audio_channels (int): input/output audio channels. - channels (int): initial number of hidden channels. - channels_time: if not None, use a different `channels` value for the time branch. - growth: increase the number of hidden channels by this factor at each layer. - nfft: number of fft bins. Note that changing this require careful computation of - various shape parameters and will not work out of the box for hybrid models. - wiener_iters: when using Wiener filtering, number of iterations at test time. - end_iters: same but at train time. For a hybrid model, must be equal to `wiener_iters`. - wiener_residual: add residual source before wiener filtering. - cac: uses complex as channels, i.e. complex numbers are 2 channels each - in input and output. no further processing is done before ISTFT. - depth (int): number of layers in the encoder and in the decoder. - rewrite (bool): add 1x1 convolution to each layer. - hybrid (bool): make a hybrid time/frequency domain, otherwise frequency only. - hybrid_old: some models trained for MDX had a padding bug. This replicates - this bug to avoid retraining them. - multi_freqs: list of frequency ratios for splitting frequency bands with `MultiWrap`. - multi_freqs_depth: how many layers to wrap with `MultiWrap`. Only the outermost - layers will be wrapped. - freq_emb: add frequency embedding after the first frequency layer if > 0, - the actual value controls the weight of the embedding. - emb_scale: equivalent to scaling the embedding learning rate - emb_smooth: initialize the embedding with a smooth one (with respect to frequencies). - kernel_size: kernel_size for encoder and decoder layers. - stride: stride for encoder and decoder layers. - time_stride: stride for the final time layer, after the merge. - context: context for 1x1 conv in the decoder. - context_enc: context for 1x1 conv in the encoder. - norm_starts: layer at which group norm starts being used. - decoder layers are numbered in reverse order. - norm_groups: number of groups for group norm. - dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. - dconv_depth: depth of residual DConv branch. - dconv_comp: compression of DConv branch. - dconv_attn: adds attention layers in DConv branch starting at this layer. - dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. - dconv_init: initial scale for the DConv branch LayerScale. - rescale: weight recaling trick - - """ - super().__init__() - self.cac = cac - self.wiener_residual = wiener_residual - self.audio_channels = audio_channels - self.sources = sources - self.kernel_size = kernel_size - self.context = context - self.stride = stride - self.depth = depth - self.channels = channels - self.samplerate = samplerate - self.segment = segment - - self.nfft = nfft - self.hop_length = nfft // 4 - self.wiener_iters = wiener_iters - self.end_iters = end_iters - self.freq_emb = None - self.hybrid = hybrid - self.hybrid_old = hybrid_old - if hybrid_old: - assert hybrid, "hybrid_old must come with hybrid=True" - if hybrid: - assert wiener_iters == end_iters - - self.encoder = nn.ModuleList() - self.decoder = nn.ModuleList() - - if hybrid: - self.tencoder = nn.ModuleList() - self.tdecoder = nn.ModuleList() - - chin = audio_channels - chin_z = chin # number of channels for the freq branch - if self.cac: - chin_z *= 2 - chout = channels_time or channels - chout_z = channels - freqs = nfft // 2 - - for index in range(depth): - lstm = index >= dconv_lstm - attn = index >= dconv_attn - norm = index >= norm_starts - freq = freqs > 1 - stri = stride - ker = kernel_size - if not freq: - assert freqs == 1 - ker = time_stride * 2 - stri = time_stride - - pad = True - last_freq = False - if freq and freqs <= kernel_size: - ker = freqs - pad = False - last_freq = True - - kw = { - 'kernel_size': ker, - 'stride': stri, - 'freq': freq, - 'pad': pad, - 'norm': norm, - 'rewrite': rewrite, - 'norm_groups': norm_groups, - 'dconv_kw': { - 'lstm': lstm, - 'attn': attn, - 'depth': dconv_depth, - 'compress': dconv_comp, - 'init': dconv_init, - 'gelu': True, - } - } - kwt = dict(kw) - kwt['freq'] = 0 - kwt['kernel_size'] = kernel_size - kwt['stride'] = stride - kwt['pad'] = True - kw_dec = dict(kw) - multi = False - if multi_freqs and index < multi_freqs_depth: - multi = True - kw_dec['context_freq'] = False - - if last_freq: - chout_z = max(chout, chout_z) - chout = chout_z - - enc = HEncLayer(chin_z, chout_z, - dconv=dconv_mode & 1, context=context_enc, **kw) - if hybrid and freq: - tenc = HEncLayer(chin, chout, dconv=dconv_mode & 1, context=context_enc, - empty=last_freq, **kwt) - self.tencoder.append(tenc) - - if multi: - enc = MultiWrap(enc, multi_freqs) - self.encoder.append(enc) - if index == 0: - chin = self.audio_channels * len(self.sources) - chin_z = chin - if self.cac: - chin_z *= 2 - dec = HDecLayer(chout_z, chin_z, dconv=dconv_mode & 2, - last=index == 0, context=context, **kw_dec) - if multi: - dec = MultiWrap(dec, multi_freqs) - if hybrid and freq: - tdec = HDecLayer(chout, chin, dconv=dconv_mode & 2, empty=last_freq, - last=index == 0, context=context, **kwt) - self.tdecoder.insert(0, tdec) - self.decoder.insert(0, dec) - - chin = chout - chin_z = chout_z - chout = int(growth * chout) - chout_z = int(growth * chout_z) - if freq: - if freqs <= kernel_size: - freqs = 1 - else: - freqs //= stride - if index == 0 and freq_emb: - self.freq_emb = ScaledEmbedding( - freqs, chin_z, smooth=emb_smooth, scale=emb_scale) - self.freq_emb_scale = freq_emb - - if rescale: - rescale_module(self, reference=rescale) - - def _spec(self, x): - hl = self.hop_length - nfft = self.nfft - x0 = x # noqa - - if self.hybrid: - # We re-pad the signal in order to keep the property - # that the size of the output is exactly the size of the input - # divided by the stride (here hop_length), when divisible. - # This is achieved by padding by 1/4th of the kernel size (here nfft). - # which is not supported by torch.stft. - # Having all convolution operations follow this convention allow to easily - # align the time and frequency branches later on. - assert hl == nfft // 4 - le = int(math.ceil(x.shape[-1] / hl)) - pad = hl // 2 * 3 - if not self.hybrid_old: - x = F.pad(x, (pad, pad + le * hl - x.shape[-1]), mode='reflect') - else: - x = F.pad(x, (pad, pad + le * hl - x.shape[-1])) - - z = spectro(x, nfft, hl)[..., :-1, :] - if self.hybrid: - assert z.shape[-1] == le + 4, (z.shape, x.shape, le) - z = z[..., 2:2+le] - return z - - def _ispec(self, z, length=None, scale=0): - hl = self.hop_length // (4 ** scale) - z = F.pad(z, (0, 0, 0, 1)) - if self.hybrid: - z = F.pad(z, (2, 2)) - pad = hl // 2 * 3 - if not self.hybrid_old: - le = hl * int(math.ceil(length / hl)) + 2 * pad - else: - le = hl * int(math.ceil(length / hl)) - x = ispectro(z, hl, length=le) - if not self.hybrid_old: - x = x[..., pad:pad + length] - else: - x = x[..., :length] - else: - x = ispectro(z, hl, length) - return x - - def _magnitude(self, z): - # return the magnitude of the spectrogram, except when cac is True, - # in which case we just move the complex dimension to the channel one. - if self.cac: - B, C, Fr, T = z.shape - m = torch.view_as_real(z).permute(0, 1, 4, 2, 3) - m = m.reshape(B, C * 2, Fr, T) - else: - m = z.abs() - return m - - def _mask(self, z, m): - # Apply masking given the mixture spectrogram `z` and the estimated mask `m`. - # If `cac` is True, `m` is actually a full spectrogram and `z` is ignored. - niters = self.wiener_iters - if self.cac: - B, S, C, Fr, T = m.shape - out = m.view(B, S, -1, 2, Fr, T).permute(0, 1, 2, 4, 5, 3) - out = torch.view_as_complex(out.contiguous()) - return out - if self.training: - niters = self.end_iters - if niters < 0: - z = z[:, None] - return z / (1e-8 + z.abs()) * m - else: - return self._wiener(m, z, niters) - - def _wiener(self, mag_out, mix_stft, niters): - # apply wiener filtering from OpenUnmix. - init = mix_stft.dtype - wiener_win_len = 300 - residual = self.wiener_residual - - B, S, C, Fq, T = mag_out.shape - mag_out = mag_out.permute(0, 4, 3, 2, 1) - mix_stft = torch.view_as_real(mix_stft.permute(0, 3, 2, 1)) - - outs = [] - for sample in range(B): - pos = 0 - out = [] - for pos in range(0, T, wiener_win_len): - frame = slice(pos, pos + wiener_win_len) - z_out = wiener( - mag_out[sample, frame], mix_stft[sample, frame], niters, - residual=residual) - out.append(z_out.transpose(-1, -2)) - outs.append(torch.cat(out, dim=0)) - out = torch.view_as_complex(torch.stack(outs, 0)) - out = out.permute(0, 4, 3, 2, 1).contiguous() - if residual: - out = out[:, :-1] - assert list(out.shape) == [B, S, C, Fq, T] - return out.to(init) - - def forward(self, mix): - x = mix - length = x.shape[-1] - - z = self._spec(mix) - mag = self._magnitude(z) - x = mag - - B, C, Fq, T = x.shape - - # unlike previous Demucs, we always normalize because it is easier. - mean = x.mean(dim=(1, 2, 3), keepdim=True) - std = x.std(dim=(1, 2, 3), keepdim=True) - x = (x - mean) / (1e-5 + std) - # x will be the freq. branch input. - - if self.hybrid: - # Prepare the time branch input. - xt = mix - meant = xt.mean(dim=(1, 2), keepdim=True) - stdt = xt.std(dim=(1, 2), keepdim=True) - xt = (xt - meant) / (1e-5 + stdt) - - # okay, this is a giant mess I know... - saved = [] # skip connections, freq. - saved_t = [] # skip connections, time. - lengths = [] # saved lengths to properly remove padding, freq branch. - lengths_t = [] # saved lengths for time branch. - for idx, encode in enumerate(self.encoder): - lengths.append(x.shape[-1]) - inject = None - if self.hybrid and idx < len(self.tencoder): - # we have not yet merged branches. - lengths_t.append(xt.shape[-1]) - tenc = self.tencoder[idx] - xt = tenc(xt) - if not tenc.empty: - # save for skip connection - saved_t.append(xt) - else: - # tenc contains just the first conv., so that now time and freq. - # branches have the same shape and can be merged. - inject = xt - x = encode(x, inject) - if idx == 0 and self.freq_emb is not None: - # add frequency embedding to allow for non equivariant convolutions - # over the frequency axis. - frs = torch.arange(x.shape[-2], device=x.device) - emb = self.freq_emb(frs).t()[None, :, :, None].expand_as(x) - x = x + self.freq_emb_scale * emb - - saved.append(x) - - x = torch.zeros_like(x) - if self.hybrid: - xt = torch.zeros_like(x) - # initialize everything to zero (signal will go through u-net skips). - - for idx, decode in enumerate(self.decoder): - skip = saved.pop(-1) - x, pre = decode(x, skip, lengths.pop(-1)) - # `pre` contains the output just before final transposed convolution, - # which is used when the freq. and time branch separate. - - if self.hybrid: - offset = self.depth - len(self.tdecoder) - if self.hybrid and idx >= offset: - tdec = self.tdecoder[idx - offset] - length_t = lengths_t.pop(-1) - if tdec.empty: - assert pre.shape[2] == 1, pre.shape - pre = pre[:, :, 0] - xt, _ = tdec(pre, None, length_t) - else: - skip = saved_t.pop(-1) - xt, _ = tdec(xt, skip, length_t) - - # Let's make sure we used all stored skip connections. - assert len(saved) == 0 - assert len(lengths_t) == 0 - assert len(saved_t) == 0 - - S = len(self.sources) - x = x.view(B, S, -1, Fq, T) - x = x * std[:, None] + mean[:, None] - - zout = self._mask(z, x) - x = self._ispec(zout, length) - - if self.hybrid: - xt = xt.view(B, S, -1, length) - xt = xt * stdt[:, None] + meant[:, None] - x = xt + x - return x diff --git a/demucs/pretrained.py b/demucs/pretrained.py deleted file mode 100644 index 1c976c6..0000000 --- a/demucs/pretrained.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Loading pretrained models. -""" - -import logging -from pathlib import Path -import typing as tp - -from dora.log import fatal - -from .hdemucs import HDemucs -from .repo import RemoteRepo, LocalRepo, ModelOnlyRepo, BagOnlyRepo, AnyModelRepo, ModelLoadingError # noqa - -logger = logging.getLogger(__name__) -ROOT_URL = "https://dl.fbaipublicfiles.com/demucs/mdx_final/" -REMOTE_ROOT = Path(__file__).parent / 'remote' - -SOURCES = ["drums", "bass", "other", "vocals"] - - -def demucs_unittest(): - model = HDemucs(channels=4, sources=SOURCES) - return model - - -def add_model_flags(parser): - group = parser.add_mutually_exclusive_group(required=False) - group.add_argument("-s", "--sig", help="Locally trained XP signature.") - group.add_argument("-n", "--name", default="mdx_extra_q", - help="Pretrained model name or signature. Default is mdx_extra_q.") - parser.add_argument("--repo", type=Path, - help="Folder containing all pre-trained models for use with -n.") - - -def get_model(name: str, - repo: tp.Optional[Path] = None): - """`name` must be a bag of models name or a pretrained signature - from the remote AWS model repo or the specified local repo if `repo` is not None. - """ - if name == 'demucs_unittest': - return demucs_unittest() - model_repo: ModelOnlyRepo - if repo is None: - remote_files = [line.strip() - for line in (REMOTE_ROOT / 'files.txt').read_text().split('\n') - if line.strip()] - model_repo = RemoteRepo(ROOT_URL, remote_files) - bag_repo = BagOnlyRepo(REMOTE_ROOT, model_repo) - else: - if not repo.is_dir(): - fatal(f"{repo} must exist and be a directory.") - model_repo = LocalRepo(repo) - bag_repo = BagOnlyRepo(repo, model_repo) - any_repo = AnyModelRepo(model_repo, bag_repo) - return any_repo.get_model(name) - - -def get_model_from_args(args): - """ - Load local model package or pre-trained model. - """ - return get_model(name=args.name, repo=args.repo) diff --git a/demucs/repo.py b/demucs/repo.py deleted file mode 100644 index f79c532..0000000 --- a/demucs/repo.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Represents a model repository, including pre-trained models and bags of models. -A repo can either be the main remote repository stored in AWS, or a local repository -with your own models. -""" - -from hashlib import sha256 -from pathlib import Path -import typing as tp - -import torch -import yaml - -from .apply import BagOfModels, Model -from .states import load_model - - -AnyModel = tp.Union[Model, BagOfModels] - - -class ModelLoadingError(RuntimeError): - pass - - -def check_checksum(path: Path, checksum: str): - sha = sha256() - with open(path, 'rb') as file: - while True: - buf = file.read(2**20) - if not buf: - break - sha.update(buf) - actual_checksum = sha.hexdigest()[:len(checksum)] - if actual_checksum != checksum: - raise ModelLoadingError(f'Invalid checksum for file {path}, ' - f'expected {checksum} but got {actual_checksum}') - - -class ModelOnlyRepo: - """Base class for all model only repos. - """ - def has_model(self, sig: str) -> bool: - raise NotImplementedError() - - def get_model(self, sig: str) -> Model: - raise NotImplementedError() - - -class RemoteRepo(ModelOnlyRepo): - def __init__(self, root_url: str, remote_files: tp.List[str]): - if not root_url.endswith('/'): - root_url += '/' - self._models: tp.Dict[str, str] = {} - for file in remote_files: - sig, checksum = file.split('.')[0].split('-') - assert sig not in self._models - self._models[sig] = root_url + file - - def has_model(self, sig: str) -> bool: - return sig in self._models - - def get_model(self, sig: str) -> Model: - try: - url = self._models[sig] - except KeyError: - raise ModelLoadingError(f'Could not find a pre-trained model with signature {sig}.') - pkg = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) - return load_model(pkg) - - -class LocalRepo(ModelOnlyRepo): - def __init__(self, root: Path): - self.root = root - self.scan() - - def scan(self): - self._models = {} - self._checksums = {} - for file in self.root.iterdir(): - if file.suffix == '.th': - if '-' in file.stem: - xp_sig, checksum = file.stem.split('-') - self._checksums[xp_sig] = checksum - else: - xp_sig = file.stem - if xp_sig in self._models: - raise ModelLoadingError( - f'Duplicate pre-trained model exist for signature {xp_sig}. ' - 'Please delete all but one.') - self._models[xp_sig] = file - - def has_model(self, sig: str) -> bool: - return sig in self._models - - def get_model(self, sig: str) -> Model: - try: - file = self._models[sig] - except KeyError: - raise ModelLoadingError(f'Could not find pre-trained model with signature {sig}.') - if sig in self._checksums: - check_checksum(file, self._checksums[sig]) - return load_model(file) - - -class BagOnlyRepo: - """Handles only YAML files containing bag of models, leaving the actual - model loading to some Repo. - """ - def __init__(self, root: Path, model_repo: ModelOnlyRepo): - self.root = root - self.model_repo = model_repo - self.scan() - - def scan(self): - self._bags = {} - for file in self.root.iterdir(): - if file.suffix == '.yaml': - self._bags[file.stem] = file - - def has_model(self, name: str) -> bool: - return name in self._bags - - def get_model(self, name: str) -> BagOfModels: - try: - yaml_file = self._bags[name] - except KeyError: - raise ModelLoadingError(f'{name} is neither a single pre-trained model or ' - 'a bag of models.') - bag = yaml.safe_load(open(yaml_file)) - signatures = bag['models'] - models = [self.model_repo.get_model(sig) for sig in signatures] - weights = bag.get('weights') - segment = bag.get('segment') - return BagOfModels(models, weights, segment) - - -class AnyModelRepo: - def __init__(self, model_repo: ModelOnlyRepo, bag_repo: BagOnlyRepo): - self.model_repo = model_repo - self.bag_repo = bag_repo - - def has_model(self, name_or_sig: str) -> bool: - return self.model_repo.has_model(name_or_sig) or self.bag_repo.has_model(name_or_sig) - - def get_model(self, name_or_sig: str) -> AnyModel: - if self.model_repo.has_model(name_or_sig): - return self.model_repo.get_model(name_or_sig) - else: - return self.bag_repo.get_model(name_or_sig) diff --git a/demucs/separate.py b/demucs/separate.py deleted file mode 100644 index 1554ce3..0000000 --- a/demucs/separate.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys -from pathlib import Path -import subprocess - -from dora.log import fatal -import torch as th -import torchaudio as ta - -from .apply import apply_model, BagOfModels -from .audio import AudioFile, convert_audio, save_audio -from .pretrained import get_model_from_args, add_model_flags, ModelLoadingError - - -def load_track(track, audio_channels, samplerate): - errors = {} - wav = None - - try: - wav = AudioFile(track).read( - streams=0, - samplerate=samplerate, - channels=audio_channels) - except FileNotFoundError: - errors['ffmpeg'] = 'Ffmpeg is not installed.' - except subprocess.CalledProcessError: - errors['ffmpeg'] = 'FFmpeg could not read the file.' - - if wav is None: - try: - wav, sr = ta.load(str(track)) - except RuntimeError as err: - errors['torchaudio'] = err.args[0] - else: - wav = convert_audio(wav, sr, samplerate, audio_channels) - - if wav is None: - print(f"Could not load file {track}. " - "Maybe it is not a supported file format? ") - for backend, error in errors.items(): - print(f"When trying to load using {backend}, got the following error: {error}") - sys.exit(1) - return wav - - -def main(): - parser = argparse.ArgumentParser("demucs.separate", - description="Separate the sources for the given tracks") - parser.add_argument("tracks", nargs='+', type=Path, default=[], help='Path to tracks') - add_model_flags(parser) - parser.add_argument("-v", "--verbose", action="store_true") - parser.add_argument("-o", - "--out", - type=Path, - default=Path("separated"), - help="Folder where to put extracted tracks. A subfolder " - "with the model name will be created.") - parser.add_argument("-d", - "--device", - default="cuda" if th.cuda.is_available() else "cpu", - help="Device to use, default is cuda if available else cpu") - parser.add_argument("--shifts", - default=1, - type=int, - help="Number of random shifts for equivariant stabilization." - "Increase separation time but improves quality for Demucs. 10 was used " - "in the original paper.") - parser.add_argument("--overlap", - default=0.25, - type=float, - help="Overlap between the splits.") - split_group = parser.add_mutually_exclusive_group() - split_group.add_argument("--no-split", - action="store_false", - dest="split", - default=True, - help="Doesn't split audio in chunks. " - "This can use large amounts of memory.") - split_group.add_argument("--segment", type=int, - help="Set split size of each chunk. " - "This can help save memory of graphic card. ") - parser.add_argument("--two-stems", - dest="stem", metavar="STEM", - help="Only separate audio into {STEM} and no_{STEM}. ") - group = parser.add_mutually_exclusive_group() - group.add_argument("--int24", action="store_true", - help="Save wav output as 24 bits wav.") - group.add_argument("--float32", action="store_true", - help="Save wav output as float32 (2x bigger).") - parser.add_argument("--clip-mode", default="rescale", choices=["rescale", "clamp"], - help="Strategy for avoiding clipping: rescaling entire signal " - "if necessary (rescale) or hard clipping (clamp).") - parser.add_argument("--mp3", action="store_true", - help="Convert the output wavs to mp3.") - parser.add_argument("--mp3-bitrate", - default=320, - type=int, - help="Bitrate of converted mp3.") - parser.add_argument("-j", "--jobs", - default=0, - type=int, - help="Number of jobs. This can increase memory usage but will " - "be much faster when multiple cores are available.") - - args = parser.parse_args() - - try: - model = get_model_from_args(args) - except ModelLoadingError as error: - fatal(error.args[0]) - - if args.segment is not None and args.segment < 8: - fatal('Segment must greater than 8. ') - - if isinstance(model, BagOfModels): - if args.segment is not None: - for sub in model.models: - sub.segment = args.segment - else: - if args.segment is not None: - sub.segment = args.segment - - model.cpu() - model.eval() - - if args.stem is not None and args.stem not in model.sources: - fatal( - 'error: stem "{stem}" is not in selected model. STEM must be one of {sources}.'.format( - stem=args.stem, sources=', '.join(model.sources))) - out = args.out / args.name - out.mkdir(parents=True, exist_ok=True) - print(f"Separated tracks will be stored in {out.resolve()}") - for track in args.tracks: - if not track.exists(): - print( - f"File {track} does not exist. If the path contains spaces, " - "please try again after surrounding the entire path with quotes \"\".", - file=sys.stderr) - continue - print(f"Separating track {track}") - wav = load_track(track, model.audio_channels, model.samplerate) - - ref = wav.mean(0) - wav = (wav - ref.mean()) / ref.std() - sources = apply_model(model, wav[None], device=args.device, shifts=args.shifts, - split=args.split, overlap=args.overlap, progress=True, - num_workers=args.jobs)[0] - sources = sources * ref.std() + ref.mean() - - track_folder = out / track.name.rsplit(".", 1)[0] - track_folder.mkdir(exist_ok=True) - if args.mp3: - ext = ".mp3" - else: - ext = ".wav" - kwargs = { - 'samplerate': model.samplerate, - 'bitrate': args.mp3_bitrate, - 'clip': args.clip_mode, - 'as_float': args.float32, - 'bits_per_sample': 24 if args.int24 else 16, - } - if args.stem is None: - for source, name in zip(sources, model.sources): - stem = str(track_folder / (name + ext)) - save_audio(source, stem, **kwargs) - else: - sources = list(sources) - stem = str(track_folder / (args.stem + ext)) - save_audio(sources.pop(model.sources.index(args.stem)), stem, **kwargs) - # Warning : after poping the stem, selected stem is no longer in the list 'sources' - other_stem = th.zeros_like(sources[0]) - for i in sources: - other_stem += i - stem = str(track_folder / ("no_" + args.stem + ext)) - save_audio(other_stem, stem, **kwargs) - - -if __name__ == "__main__": - main() diff --git a/demucs/solver.py b/demucs/solver.py deleted file mode 100644 index 9970615..0000000 --- a/demucs/solver.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Main training loop.""" - -import logging - -from dora import get_xp -from dora.utils import write_and_rename -from dora.log import LogProgress, bold -import torch -import torch.nn.functional as F - -from . import augment, distrib, states, pretrained -from .apply import apply_model -from .ema import ModelEMA -from .evaluate import evaluate, new_sdr -from .svd import svd_penalty -from .utils import pull_metric, EMA - -logger = logging.getLogger(__name__) - - -def _summary(metrics): - return " | ".join(f"{key.capitalize()}={val}" for key, val in metrics.items()) - - -class Solver(object): - def __init__(self, loaders, model, optimizer, args): - self.args = args - self.loaders = loaders - - self.model = model - self.optimizer = optimizer - self.quantizer = states.get_quantizer(self.model, args.quant, self.optimizer) - self.dmodel = distrib.wrap(model) - self.device = next(iter(self.model.parameters())).device - - # Exponential moving average of the model, either updated every batch or epoch. - # The best model from all the EMAs and the original one is kept based on the valid - # loss for the final best model. - self.emas = {'batch': [], 'epoch': []} - for kind in self.emas.keys(): - decays = getattr(args.ema, kind) - device = self.device if kind == 'batch' else 'cpu' - if decays: - for decay in decays: - self.emas[kind].append(ModelEMA(self.model, decay, device=device)) - - # data augment - augments = [augment.Shift(shift=int(args.dset.samplerate * args.dset.shift), - same=args.augment.shift_same)] - if args.augment.flip: - augments += [augment.FlipChannels(), augment.FlipSign()] - for aug in ['scale', 'remix']: - kw = getattr(args.augment, aug) - if kw.proba: - augments.append(getattr(augment, aug.capitalize())(**kw)) - self.augment = torch.nn.Sequential(*augments) - - xp = get_xp() - self.folder = xp.folder - # Checkpoints - self.checkpoint_file = xp.folder / 'checkpoint.th' - self.best_file = xp.folder / 'best.th' - logger.debug("Checkpoint will be saved to %s", self.checkpoint_file.resolve()) - self.best_state = None - self.best_changed = False - - self.link = xp.link - self.history = self.link.history - - self._reset() - - def _serialize(self, epoch): - package = {} - package['state'] = self.model.state_dict() - package['optimizer'] = self.optimizer.state_dict() - package['history'] = self.history - package['best_state'] = self.best_state - package['args'] = self.args - for kind, emas in self.emas.items(): - for k, ema in enumerate(emas): - package[f'ema_{kind}_{k}'] = ema.state_dict() - with write_and_rename(self.checkpoint_file) as tmp: - torch.save(package, tmp) - - save_every = self.args.save_every - if save_every and (epoch + 1) % save_every == 0 and epoch + 1 != self.args.epochs: - with write_and_rename(self.folder / f'checkpoint_{epoch + 1}.th') as tmp: - torch.save(package, tmp) - - if self.best_changed: - # Saving only the latest best model. - with write_and_rename(self.best_file) as tmp: - package = states.serialize_model(self.model, self.args) - package['state'] = self.best_state - torch.save(package, tmp) - self.best_changed = False - - def _reset(self): - """Reset state of the solver, potentially using checkpoint.""" - if self.checkpoint_file.exists(): - logger.info(f'Loading checkpoint model: {self.checkpoint_file}') - package = torch.load(self.checkpoint_file, 'cpu') - self.model.load_state_dict(package['state']) - self.optimizer.load_state_dict(package['optimizer']) - self.history[:] = package['history'] - self.best_state = package['best_state'] - for kind, emas in self.emas.items(): - for k, ema in enumerate(emas): - ema.load_state_dict(package[f'ema_{kind}_{k}']) - elif self.args.continue_pretrained: - model = pretrained.get_model( - name=self.args.continue_pretrained, - repo=self.args.pretrained_repo) - self.model.load_state_dict(model.state_dict()) - elif self.args.continue_from: - name = 'checkpoint.th' - root = self.folder.parent - cf = root / str(self.args.continue_from) / name - logger.info("Loading from %s", cf) - package = torch.load(cf, 'cpu') - self.best_state = package['best_state'] - if self.args.continue_best: - self.model.load_state_dict(package['best_state'], strict=False) - else: - self.model.load_state_dict(package['state'], strict=False) - if self.args.continue_opt: - self.optimizer.load_state_dict(package['optimizer']) - - def _format_train(self, metrics: dict) -> dict: - """Formatting for train/valid metrics.""" - losses = { - 'loss': format(metrics['loss'], ".4f"), - 'reco': format(metrics['reco'], ".4f"), - } - if 'nsdr' in metrics: - losses['nsdr'] = format(metrics['nsdr'], ".3f") - if self.quantizer is not None: - losses['ms'] = format(metrics['ms'], ".2f") - if 'grad' in metrics: - losses['grad'] = format(metrics['grad'], ".4f") - if 'best' in metrics: - losses['best'] = format(metrics['best'], '.4f') - if 'bname' in metrics: - losses['bname'] = metrics['bname'] - if 'penalty' in metrics: - losses['penalty'] = format(metrics['penalty'], ".4f") - if 'hloss' in metrics: - losses['hloss'] = format(metrics['hloss'], ".4f") - return losses - - def _format_test(self, metrics: dict) -> dict: - """Formatting for test metrics.""" - losses = {} - if 'sdr' in metrics: - losses['sdr'] = format(metrics['sdr'], '.3f') - if 'nsdr' in metrics: - losses['nsdr'] = format(metrics['nsdr'], '.3f') - for source in self.model.sources: - key = f'sdr_{source}' - if key in metrics: - losses[key] = format(metrics[key], '.3f') - key = f'nsdr_{source}' - if key in metrics: - losses[key] = format(metrics[key], '.3f') - return losses - - def train(self): - # Optimizing the model - if self.history: - logger.info("Replaying metrics from previous run") - for epoch, metrics in enumerate(self.history): - formatted = self._format_train(metrics['train']) - logger.info( - bold(f'Train Summary | Epoch {epoch + 1} | {_summary(formatted)}')) - formatted = self._format_train(metrics['valid']) - logger.info( - bold(f'Valid Summary | Epoch {epoch + 1} | {_summary(formatted)}')) - if 'test' in metrics: - formatted = self._format_test(metrics['test']) - if formatted: - logger.info(bold(f"Test Summary | Epoch {epoch + 1} | {_summary(formatted)}")) - - epoch = 0 - for epoch in range(len(self.history), self.args.epochs): - # Train one epoch - self.model.train() # Turn on BatchNorm & Dropout - metrics = {} - logger.info('-' * 70) - logger.info("Training...") - metrics['train'] = self._run_one_epoch(epoch) - formatted = self._format_train(metrics['train']) - logger.info( - bold(f'Train Summary | Epoch {epoch + 1} | {_summary(formatted)}')) - - # Cross validation - logger.info('-' * 70) - logger.info('Cross validation...') - self.model.eval() # Turn off Batchnorm & Dropout - with torch.no_grad(): - valid = self._run_one_epoch(epoch, train=False) - bvalid = valid - bname = 'main' - state = states.copy_state(self.model.state_dict()) - metrics['valid'] = {} - metrics['valid']['main'] = valid - key = self.args.test.metric - for kind, emas in self.emas.items(): - for k, ema in enumerate(emas): - with ema.swap(): - valid = self._run_one_epoch(epoch, train=False) - name = f'ema_{kind}_{k}' - metrics['valid'][name] = valid - a = valid[key] - b = bvalid[key] - if key.startswith('nsdr'): - a = -a - b = -b - if a < b: - bvalid = valid - state = ema.state - bname = name - metrics['valid'].update(bvalid) - metrics['valid']['bname'] = bname - - valid_loss = metrics['valid'][key] - mets = pull_metric(self.link.history, f'valid.{key}') + [valid_loss] - if key.startswith('nsdr'): - best_loss = max(mets) - else: - best_loss = min(mets) - metrics['valid']['best'] = best_loss - if self.args.svd.penalty > 0: - kw = dict(self.args.svd) - kw.pop('penalty') - with torch.no_grad(): - penalty = svd_penalty(self.model, exact=True, **kw) - metrics['valid']['penalty'] = penalty - - formatted = self._format_train(metrics['valid']) - logger.info( - bold(f'Valid Summary | Epoch {epoch + 1} | {_summary(formatted)}')) - - # Save the best model - if valid_loss == best_loss or self.args.dset.train_valid: - logger.info(bold('New best valid loss %.4f'), valid_loss) - self.best_state = states.copy_state(state) - self.best_changed = True - - # Eval model every `test.every` epoch or on last epoch - should_eval = (epoch + 1) % self.args.test.every == 0 - is_last = epoch == self.args.epochs - 1 - reco = metrics['valid']['main']['reco'] - # Tries to detect divergence in a reliable way and finish job - # not to waste compute. - div = epoch >= 180 and reco > 0.18 - div = div or epoch >= 100 and reco > 0.25 - div = div and self.args.optim.loss == 'l1' - if div: - logger.warning("Finishing training early because valid loss is too high.") - is_last = True - if should_eval or is_last: - # Evaluate on the testset - logger.info('-' * 70) - logger.info('Evaluating on the test set...') - # We switch to the best known model for testing - if self.args.test.best: - state = self.best_state - else: - state = states.copy_state(self.model.state_dict()) - compute_sdr = self.args.test.sdr and is_last - with states.swap_state(self.model, state): - with torch.no_grad(): - metrics['test'] = evaluate(self, compute_sdr=compute_sdr) - formatted = self._format_test(metrics['test']) - logger.info(bold(f"Test Summary | Epoch {epoch + 1} | {_summary(formatted)}")) - self.link.push_metrics(metrics) - - if distrib.rank == 0: - # Save model each epoch - self._serialize(epoch) - logger.debug("Checkpoint saved to %s", self.checkpoint_file.resolve()) - if is_last: - break - - def _run_one_epoch(self, epoch, train=True): - args = self.args - data_loader = self.loaders['train'] if train else self.loaders['valid'] - # get a different order for distributed training, otherwise this will get ignored - data_loader.sampler.epoch = epoch - - label = ["Valid", "Train"][train] - name = label + f" | Epoch {epoch + 1}" - total = len(data_loader) - if args.max_batches: - total = min(total, args.max_batches) - logprog = LogProgress(logger, data_loader, total=total, - updates=self.args.misc.num_prints, name=name) - averager = EMA() - - for idx, sources in enumerate(logprog): - sources = sources.to(self.device) - if train: - sources = self.augment(sources) - mix = sources.sum(dim=1) - else: - mix = sources[:, 0] - sources = sources[:, 1:] - - if not train and self.args.valid_apply: - estimate = apply_model(self.model, mix, split=self.args.test.split, overlap=0) - else: - estimate = self.dmodel(mix) - if train and hasattr(self.model, 'transform_target'): - sources = self.model.transform_target(mix, sources) - assert estimate.shape == sources.shape, (estimate.shape, sources.shape) - dims = tuple(range(2, sources.dim())) - - if args.optim.loss == 'l1': - loss = F.l1_loss(estimate, sources, reduction='none') - loss = loss.mean(dims).mean(0) - reco = loss - elif args.optim.loss == 'mse': - loss = F.mse_loss(estimate, sources, reduction='none') - loss = loss.mean(dims) - reco = loss**0.5 - reco = reco.mean(0) - else: - raise ValueError(f"Invalid loss {self.args.loss}") - weights = torch.tensor(args.weights).to(sources) - loss = (loss * weights).sum() / weights.sum() - - ms = 0 - if self.quantizer is not None: - ms = self.quantizer.model_size() - if args.quant.diffq: - loss += args.quant.diffq * ms - - losses = {} - losses['reco'] = (reco * weights).sum() / weights.sum() - losses['ms'] = ms - - if not train: - nsdrs = new_sdr(sources, estimate.detach()).mean(0) - total = 0 - for source, nsdr, w in zip(self.model.sources, nsdrs, weights): - losses[f'nsdr_{source}'] = nsdr - total += w * nsdr - losses['nsdr'] = total / weights.sum() - - if train and args.svd.penalty > 0: - kw = dict(args.svd) - kw.pop('penalty') - penalty = svd_penalty(self.model, **kw) - losses['penalty'] = penalty - loss += args.svd.penalty * penalty - - losses['loss'] = loss - - for k, source in enumerate(self.model.sources): - losses[f'reco_{source}'] = reco[k] - - # optimize model in training mode - if train: - loss.backward() - grad_norm = 0 - grads = [] - for p in self.model.parameters(): - if p.grad is not None: - grad_norm += p.grad.data.norm()**2 - grads.append(p.grad.data) - losses['grad'] = grad_norm ** 0.5 - if args.optim.clip_grad: - torch.nn.utils.clip_grad_norm_( - self.model.parameters(), - args.optim.clip_grad) - - if self.args.flag == 'uns': - for n, p in self.model.named_parameters(): - if p.grad is None: - print('no grad', n) - self.optimizer.step() - self.optimizer.zero_grad() - for ema in self.emas['batch']: - ema.update() - losses = averager(losses) - logs = self._format_train(losses) - logprog.update(**logs) - # Just in case, clear some memory - del loss, estimate, reco, ms - if args.max_batches == idx: - break - if self.args.debug and train: - break - if self.args.flag == 'debug': - break - if train: - for ema in self.emas['epoch']: - ema.update() - return distrib.average(losses, idx + 1) diff --git a/demucs/spec.py b/demucs/spec.py deleted file mode 100644 index 85e5dc9..0000000 --- a/demucs/spec.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Conveniance wrapper to perform STFT and iSTFT""" - -import torch as th - - -def spectro(x, n_fft=512, hop_length=None, pad=0): - *other, length = x.shape - x = x.reshape(-1, length) - z = th.stft(x, - n_fft * (1 + pad), - hop_length or n_fft // 4, - window=th.hann_window(n_fft).to(x), - win_length=n_fft, - normalized=True, - center=True, - return_complex=True, - pad_mode='reflect') - _, freqs, frame = z.shape - return z.view(*other, freqs, frame) - - -def ispectro(z, hop_length=None, length=None, pad=0): - *other, freqs, frames = z.shape - n_fft = 2 * freqs - 2 - z = z.view(-1, freqs, frames) - win_length = n_fft // (1 + pad) - x = th.istft(z, - n_fft, - hop_length, - window=th.hann_window(win_length).to(z.real), - win_length=win_length, - normalized=True, - length=length, - center=True) - _, length = x.shape - return x.view(*other, length) diff --git a/demucs/states.py b/demucs/states.py deleted file mode 100644 index db17a18..0000000 --- a/demucs/states.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -""" -Utilities to save and load models. -""" -from contextlib import contextmanager - -import functools -import hashlib -import inspect -import io -from pathlib import Path -import warnings - -from omegaconf import OmegaConf -from diffq import DiffQuantizer, UniformQuantizer, restore_quantized_state -import torch - - -def get_quantizer(model, args, optimizer=None): - """Return the quantizer given the XP quantization args.""" - quantizer = None - if args.diffq: - quantizer = DiffQuantizer( - model, min_size=args.min_size, group_size=args.group_size) - if optimizer is not None: - quantizer.setup_optimizer(optimizer) - elif args.qat: - quantizer = UniformQuantizer( - model, bits=args.qat, min_size=args.min_size) - return quantizer - - -def load_model(path_or_package, strict=False): - """Load a model from the given serialized model, either given as a dict (already loaded) - or a path to a file on disk.""" - if isinstance(path_or_package, dict): - package = path_or_package - elif isinstance(path_or_package, (str, Path)): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - path = path_or_package - package = torch.load(path, 'cpu') - else: - raise ValueError(f"Invalid type for {path_or_package}.") - - klass = package["klass"] - args = package["args"] - kwargs = package["kwargs"] - - if strict: - model = klass(*args, **kwargs) - else: - sig = inspect.signature(klass) - for key in list(kwargs): - if key not in sig.parameters: - warnings.warn("Dropping inexistant parameter " + key) - del kwargs[key] - model = klass(*args, **kwargs) - - state = package["state"] - - set_state(model, state) - return model - - -def get_state(model, quantizer, half=False): - """Get the state from a model, potentially with quantization applied. - If `half` is True, model are stored as half precision, which shouldn't impact performance - but half the state size.""" - if quantizer is None: - dtype = torch.half if half else None - state = {k: p.data.to(device='cpu', dtype=dtype) for k, p in model.state_dict().items()} - else: - state = quantizer.get_quantized_state() - state['__quantized'] = True - return state - - -def set_state(model, state, quantizer=None): - """Set the state on a given model.""" - if state.get('__quantized'): - if quantizer is not None: - quantizer.restore_quantized_state(model, state['quantized']) - else: - restore_quantized_state(model, state) - else: - model.load_state_dict(state) - return state - - -def save_with_checksum(content, path): - """Save the given value on disk, along with a sha256 hash. - Should be used with the output of either `serialize_model` or `get_state`.""" - buf = io.BytesIO() - torch.save(content, buf) - sig = hashlib.sha256(buf.getvalue()).hexdigest()[:8] - - path = path.parent / (path.stem + "-" + sig + path.suffix) - path.write_bytes(buf.getvalue()) - - -def serialize_model(model, training_args, quantizer=None, half=True): - args, kwargs = model._init_args_kwargs - klass = model.__class__ - - state = get_state(model, quantizer, half) - return { - 'klass': klass, - 'args': args, - 'kwargs': kwargs, - 'state': state, - 'training_args': OmegaConf.to_container(training_args, resolve=True), - } - - -def copy_state(state): - return {k: v.cpu().clone() for k, v in state.items()} - - -@contextmanager -def swap_state(model, state): - """ - Context manager that swaps the state of a model, e.g: - - # model is in old state - with swap_state(model, new_state): - # model in new state - # model back to old state - """ - old_state = copy_state(model.state_dict()) - model.load_state_dict(state, strict=False) - try: - yield - finally: - model.load_state_dict(old_state) - - -def capture_init(init): - @functools.wraps(init) - def __init__(self, *args, **kwargs): - self._init_args_kwargs = (args, kwargs) - init(self, *args, **kwargs) - - return __init__ diff --git a/demucs/svd.py b/demucs/svd.py deleted file mode 100644 index 96a74e2..0000000 --- a/demucs/svd.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Ways to make the model stronger.""" -import random -import torch - - -def power_iteration(m, niters=1, bs=1): - """This is the power method. batch size is used to try multiple starting point in parallel.""" - assert m.dim() == 2 - assert m.shape[0] == m.shape[1] - dim = m.shape[0] - b = torch.randn(dim, bs, device=m.device, dtype=m.dtype) - - for _ in range(niters): - n = m.mm(b) - norm = n.norm(dim=0, keepdim=True) - b = n / (1e-10 + norm) - - return norm.mean() - - -# We need a shared RNG to make sure all the distributed worker will skip the penalty together, -# as otherwise we wouldn't get any speed up. -penalty_rng = random.Random(1234) - - -def svd_penalty(model, min_size=0.1, dim=1, niters=2, powm=False, convtr=True, - proba=1, conv_only=False, exact=False, bs=1): - """ - Penalty on the largest singular value for a layer. - Args: - - model: model to penalize - - min_size: minimum size in MB of a layer to penalize. - - dim: projection dimension for the svd_lowrank. Higher is better but slower. - - niters: number of iterations in the algorithm used by svd_lowrank. - - powm: use power method instead of lowrank SVD, my own experience - is that it is both slower and less stable. - - convtr: when True, differentiate between Conv and Transposed Conv. - this is kept for compatibility with older experiments. - - proba: probability to apply the penalty. - - conv_only: only apply to conv and conv transposed, not LSTM - (might not be reliable for other models than Demucs). - - exact: use exact SVD (slow but useful at validation). - - bs: batch_size for power method. - """ - total = 0 - if penalty_rng.random() > proba: - return 0. - - for m in model.modules(): - for name, p in m.named_parameters(recurse=False): - if p.numel() / 2**18 < min_size: - continue - if convtr: - if isinstance(m, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d)): - if p.dim() in [3, 4]: - p = p.transpose(0, 1).contiguous() - if p.dim() == 3: - p = p.view(len(p), -1) - elif p.dim() == 4: - p = p.view(len(p), -1) - elif p.dim() == 1: - continue - elif conv_only: - continue - assert p.dim() == 2, (name, p.shape) - if exact: - estimate = torch.svd(p, compute_uv=False)[1].pow(2).max() - elif powm: - a, b = p.shape - if a < b: - n = p.mm(p.t()) - else: - n = p.t().mm(p) - estimate = power_iteration(n, niters, bs) - else: - estimate = torch.svd_lowrank(p, dim, niters)[1][0].pow(2) - total += estimate - return total / proba diff --git a/demucs/utils.py b/demucs/utils.py deleted file mode 100644 index 3f2afaa..0000000 --- a/demucs/utils.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from contextlib import contextmanager -import math -import os -import tempfile -import typing as tp - -import torch -from torch.nn import functional as F - - -def unfold(a, kernel_size, stride): - """Given input of size [*OT, T], output Tensor of size [*OT, F, K] - with K the kernel size, by extracting frames with the given stride. - - This will pad the input so that `F = ceil(T / K)`. - - see https://github.com/pytorch/pytorch/issues/60466 - """ - *shape, length = a.shape - n_frames = math.ceil(length / stride) - tgt_length = (n_frames - 1) * stride + kernel_size - a = F.pad(a, (0, tgt_length - length)) - strides = list(a.stride()) - assert strides[-1] == 1, 'data should be contiguous' - strides = strides[:-1] + [stride, 1] - return a.as_strided([*shape, n_frames, kernel_size], strides) - - -def center_trim(tensor: torch.Tensor, reference: tp.Union[torch.Tensor, int]): - """ - Center trim `tensor` with respect to `reference`, along the last dimension. - `reference` can also be a number, representing the length to trim to. - If the size difference != 0 mod 2, the extra sample is removed on the right side. - """ - ref_size: int - if isinstance(reference, torch.Tensor): - ref_size = reference.size(-1) - else: - ref_size = reference - delta = tensor.size(-1) - ref_size - if delta < 0: - raise ValueError("tensor must be larger than reference. " f"Delta is {delta}.") - if delta: - tensor = tensor[..., delta // 2:-(delta - delta // 2)] - return tensor - - -def pull_metric(history: tp.List[dict], name: str): - out = [] - for metrics in history: - metric = metrics - for part in name.split("."): - metric = metric[part] - out.append(metric) - return out - - -def EMA(beta: float = 1): - """ - Exponential Moving Average callback. - Returns a single function that can be called to repeatidly update the EMA - with a dict of metrics. The callback will return - the new averaged dict of metrics. - - Note that for `beta=1`, this is just plain averaging. - """ - fix: tp.Dict[str, float] = defaultdict(float) - total: tp.Dict[str, float] = defaultdict(float) - - def _update(metrics: dict, weight: float = 1) -> dict: - nonlocal total, fix - for key, value in metrics.items(): - total[key] = total[key] * beta + weight * float(value) - fix[key] = fix[key] * beta + weight - return {key: tot / fix[key] for key, tot in total.items()} - return _update - - -def sizeof_fmt(num: float, suffix: str = 'B'): - """ - Given `num` bytes, return human readable size. - Taken from https://stackoverflow.com/a/1094933 - """ - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - - -@contextmanager -def temp_filenames(count: int, delete=True): - names = [] - try: - for _ in range(count): - names.append(tempfile.NamedTemporaryFile(delete=False).name) - yield names - finally: - if delete: - for name in names: - os.unlink(name) - - -class DummyPoolExecutor: - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers=0): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return diff --git a/demucs/wav.py b/demucs/wav.py deleted file mode 100644 index 1c023a7..0000000 --- a/demucs/wav.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Loading wav based datasets, including MusdbHQ.""" - -from collections import OrderedDict -import hashlib -import math -import json -import os -from pathlib import Path -import tqdm - -import musdb -import julius -import torch as th -from torch import distributed -import torchaudio as ta -from torch.nn import functional as F - -from .audio import convert_audio_channels -from . import distrib - -MIXTURE = "mixture" -EXT = ".wav" - - -def _track_metadata(track, sources, normalize=True, ext=EXT): - track_length = None - track_samplerate = None - mean = 0 - std = 1 - for source in sources + [MIXTURE]: - file = track / f"{source}{ext}" - try: - info = ta.info(str(file)) - except RuntimeError: - print(file) - raise - length = info.num_frames - if track_length is None: - track_length = length - track_samplerate = info.sample_rate - elif track_length != length: - raise ValueError( - f"Invalid length for file {file}: " - f"expecting {track_length} but got {length}.") - elif info.sample_rate != track_samplerate: - raise ValueError( - f"Invalid sample rate for file {file}: " - f"expecting {track_samplerate} but got {info.sample_rate}.") - if source == MIXTURE and normalize: - try: - wav, _ = ta.load(str(file)) - except RuntimeError: - print(file) - raise - wav = wav.mean(0) - mean = wav.mean().item() - std = wav.std().item() - - return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate} - - -def build_metadata(path, sources, normalize=True, ext=EXT): - """ - Build the metadata for `Wavset`. - - Args: - path (str or Path): path to dataset. - sources (list[str]): list of sources to look for. - normalize (bool): if True, loads full track and store normalization - values based on the mixture file. - ext (str): extension of audio files (default is .wav). - """ - - meta = {} - path = Path(path) - pendings = [] - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(8) as pool: - for root, folders, files in os.walk(path, followlinks=True): - root = Path(root) - if root.name.startswith('.') or folders or root == path: - continue - name = str(root.relative_to(path)) - pendings.append((name, pool.submit(_track_metadata, root, sources, normalize, ext))) - # meta[name] = _track_metadata(root, sources, normalize, ext) - for name, pending in tqdm.tqdm(pendings, ncols=120): - meta[name] = pending.result() - return meta - - -class Wavset: - def __init__( - self, - root, metadata, sources, - segment=None, shift=None, normalize=True, - samplerate=44100, channels=2, ext=EXT): - """ - Waveset (or mp3 set for that matter). Can be used to train - with arbitrary sources. Each track should be one folder inside of `path`. - The folder should contain files named `{source}.{ext}`. - - Args: - root (Path or str): root folder for the dataset. - metadata (dict): output from `build_metadata`. - sources (list[str]): list of source names. - segment (None or float): segment length in seconds. If `None`, returns entire tracks. - shift (None or float): stride in seconds bewteen samples. - normalize (bool): normalizes input audio, **based on the metadata content**, - i.e. the entire track is normalized, not individual extracts. - samplerate (int): target sample rate. if the file sample rate - is different, it will be resampled on the fly. - channels (int): target nb of channels. if different, will be - changed onthe fly. - ext (str): extension for audio files (default is .wav). - - samplerate and channels are converted on the fly. - """ - self.root = Path(root) - self.metadata = OrderedDict(metadata) - self.segment = segment - self.shift = shift or segment - self.normalize = normalize - self.sources = sources - self.channels = channels - self.samplerate = samplerate - self.ext = ext - self.num_examples = [] - for name, meta in self.metadata.items(): - track_duration = meta['length'] / meta['samplerate'] - if segment is None or track_duration < segment: - examples = 1 - else: - examples = int(math.ceil((track_duration - self.segment) / self.shift) + 1) - self.num_examples.append(examples) - - def __len__(self): - return sum(self.num_examples) - - def get_file(self, name, source): - return self.root / name / f"{source}{self.ext}" - - def __getitem__(self, index): - for name, examples in zip(self.metadata, self.num_examples): - if index >= examples: - index -= examples - continue - meta = self.metadata[name] - num_frames = -1 - offset = 0 - if self.segment is not None: - offset = int(meta['samplerate'] * self.shift * index) - num_frames = int(math.ceil(meta['samplerate'] * self.segment)) - wavs = [] - for source in self.sources: - file = self.get_file(name, source) - wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames) - wav = convert_audio_channels(wav, self.channels) - wavs.append(wav) - - example = th.stack(wavs) - example = julius.resample_frac(example, meta['samplerate'], self.samplerate) - if self.normalize: - example = (example - meta['mean']) / meta['std'] - if self.segment: - length = int(self.segment * self.samplerate) - example = example[..., :length] - example = F.pad(example, (0, length - example.shape[-1])) - return example - - -def get_wav_datasets(args): - """Extract the wav datasets from the XP arguments.""" - sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8] - metadata_file = Path(args.metadata) / ('wav_' + sig + ".json") - train_path = Path(args.wav) / "train" - valid_path = Path(args.wav) / "valid" - if not metadata_file.is_file() and distrib.rank == 0: - metadata_file.parent.mkdir(exist_ok=True, parents=True) - train = build_metadata(train_path, args.sources) - valid = build_metadata(valid_path, args.sources) - json.dump([train, valid], open(metadata_file, "w")) - if distrib.world_size > 1: - distributed.barrier() - train, valid = json.load(open(metadata_file)) - if args.full_cv: - kw_cv = {} - else: - kw_cv = {'segment': args.segment, 'shift': args.shift} - train_set = Wavset(train_path, train, args.sources, - segment=args.segment, shift=args.shift, - samplerate=args.samplerate, channels=args.channels, - normalize=args.normalize) - valid_set = Wavset(valid_path, valid, [MIXTURE] + list(args.sources), - samplerate=args.samplerate, channels=args.channels, - normalize=args.normalize, **kw_cv) - return train_set, valid_set - - -def _get_musdb_valid(): - # Return musdb valid set. - import yaml - setup_path = Path(musdb.__path__[0]) / 'configs' / 'mus.yaml' - setup = yaml.safe_load(open(setup_path, 'r')) - return setup['validation_tracks'] - - -def get_musdb_wav_datasets(args): - """Extract the musdb dataset from the XP arguments.""" - sig = hashlib.sha1(str(args.musdb).encode()).hexdigest()[:8] - metadata_file = Path(args.metadata) / ('musdb_' + sig + ".json") - root = Path(args.musdb) / "train" - if not metadata_file.is_file() and distrib.rank == 0: - metadata_file.parent.mkdir(exist_ok=True, parents=True) - metadata = build_metadata(root, args.sources) - json.dump(metadata, open(metadata_file, "w")) - if distrib.world_size > 1: - distributed.barrier() - metadata = json.load(open(metadata_file)) - - valid_tracks = _get_musdb_valid() - if args.train_valid: - metadata_train = metadata - else: - metadata_train = {name: meta for name, meta in metadata.items() if name not in valid_tracks} - metadata_valid = {name: meta for name, meta in metadata.items() if name in valid_tracks} - if args.full_cv: - kw_cv = {} - else: - kw_cv = {'segment': args.segment, 'shift': args.shift} - train_set = Wavset(root, metadata_train, args.sources, - segment=args.segment, shift=args.shift, - samplerate=args.samplerate, channels=args.channels, - normalize=args.normalize) - valid_set = Wavset(root, metadata_valid, [MIXTURE] + list(args.sources), - samplerate=args.samplerate, channels=args.channels, - normalize=args.normalize, **kw_cv) - return train_set, valid_set diff --git a/demucs/wdemucs.py b/demucs/wdemucs.py deleted file mode 100644 index b0d799e..0000000 --- a/demucs/wdemucs.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# For compat -from .hdemucs import HDemucs - -WDemucs = HDemucs