From 585d58393666160c1097c422986d06d0c87f7b87 Mon Sep 17 00:00:00 2001 From: xkjiang-srfv <52552899+xkjiang-srfv@users.noreply.github.com> Date: Wed, 1 Sep 2021 14:09:04 +0800 Subject: [PATCH] Add files via upload --- ActivationPrune.py | 139 ++++ ActivationPrune.xlsx | Bin 0 -> 14675 bytes Conv2dNew.py | 201 ++++++ K_means.py | 153 +++++ Op.py | 50 ++ WeightPrune.py | 183 +++++ main.py | 39 ++ model.py | 632 ++++++++++++++++++ train.py | 289 ++++++++ ...\346\236\235\350\257\264\346\230\216.vsdx" | Bin 0 -> 45464 bytes 10 files changed, 1686 insertions(+) create mode 100644 ActivationPrune.py create mode 100644 ActivationPrune.xlsx create mode 100644 Conv2dNew.py create mode 100644 K_means.py create mode 100644 Op.py create mode 100644 WeightPrune.py create mode 100644 main.py create mode 100644 model.py create mode 100644 train.py create mode 100644 "\350\276\223\345\205\245\347\211\271\345\276\201\345\233\276\345\211\252\346\236\235\350\257\264\346\230\216.vsdx" diff --git a/ActivationPrune.py b/ActivationPrune.py new file mode 100644 index 0000000..f0a3c98 --- /dev/null +++ b/ActivationPrune.py @@ -0,0 +1,139 @@ +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Function +import time +from model import * +from train import * +import random +# from .model import ResNetBasicBlock + +from math import sqrt +import copy +from time import time +from Conv2dNew import Execution + + + +class Conv2dTest(nn.Conv2d): + def __init__(self, + ratio, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode='zeros', + ): + super(Conv2dTest, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, + bias, padding_mode) + self.ratio = ratio + def forward(self, input): + E = Execution(self.ratio) + output = E.conv2d(input, self.weight, self.bias, self.stride, self.padding) + return output + +class LinearTest(nn.Linear): + def __init__(self, + in_features, + out_features, + bias=True, + ): + super(LinearTest, self).__init__(in_features, out_features, bias) + + def forward(self, input): + output = F.linear(input, self.weight, self.bias) + return output + +def prepare(model, ratio,inplace=False): + # move intpo prepare + def addActivationPruneOp(module): + nonlocal layer_cnt + for name, child in module.named_children(): + if isinstance(child, nn.Conv2d): + p_name = str(layer_cnt) + activationPruneConv = Conv2dTest( + ratio, + child.in_channels, + child.out_channels, child.kernel_size, stride=child.stride, padding=child.padding, + dilation=child.dilation, groups=child.groups, bias=(child.bias is not None), + padding_mode=child.padding_mode + ) + if child.bias is not None: + activationPruneConv.bias = child.bias + activationPruneConv.weight = child.weight + module._modules[name] = activationPruneConv + layer_cnt += 1 + elif isinstance(child, nn.Linear): + p_name = str(layer_cnt) + activationPruneLinear = LinearTest( + child.in_features, child.out_features, + bias=(child.bias is not None) + ) + if child.bias is not None: + activationPruneLinear.bias = child.bias + activationPruneLinear.weight = child.weight + module._modules[name] = activationPruneLinear + layer_cnt += 1 + else: + addActivationPruneOp(child) # 这是用来迭代的,Maxpool层的功能是不变的 + layer_cnt = 0 + if not inplace: + model = copy.deepcopy(model) + addActivationPruneOp( model) # 为每一个卷积层添加输入特征图剪枝操作 + return model + +def getModel(modelName): + if modelName == 'LeNet': + return getLeNet() # 加载原始模型框架 + elif modelName == 'AlexNet': + return getAlexnet() + elif modelName == 'VGG16': + return get_vgg16() + elif modelName == 'SqueezeNet': + return get_squeezenet() + elif modelName == 'ResNet': + return get_resnet18() + elif modelName == 'InceptionV3': + return get_inception_v3() + # if modelName == 'MobileNet': + # return mobilenetv3_large() + +def getDataSet(modelName,batchSize,imgSize): + if modelName == 'VGG16' or modelName == 'AlexNet' or modelName == 'ResNet' or modelName == 'SqueezeNet' or modelName=='InceptionV3': + dataloaders, dataset_sizes = load_cifar10(batch_size=batchSize, pth_path='./data', + img_size=imgSize) # 确定数据集 + elif modelName == 'LeNet': + dataloaders, dataset_sizes = load_mnist(batch_size=batchSize, path='./data', img_size=imgSize) + + return dataloaders,dataset_sizes + +def getPruneModel(model_name, weight_file_path,pattern,ratio): + model_orign = getModel(model_name) + if pattern == 'test' or pattern == 'retrain': + model_orign.load_state_dict(torch.load(weight_file_path)) # 原始模型框架加载模型信息 + activationPruneModel = prepare(model_orign,ratio) + + return activationPruneModel + +def activationPruneModelOp(model_name, batch_size, img_size,pattern,ratio,epoch): + dataloaders, dataset_sizes = getDataSet(model_name, batch_size, img_size) + criterion = nn.CrossEntropyLoss() + + if pattern == 'retrain' or pattern == 'train': + weight_file_path = './pth/' + model_name + '/ratio=0'+ '/Activation' + '/best.pth' + activationPruneModel = getPruneModel(model_name, weight_file_path, pattern, ratio) + optimizer = optim.SGD(activationPruneModel.parameters(), lr=0.01, momentum=0.9) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.8) # 设置学习率下降策略 + train_model_jiang(activationPruneModel, dataloaders, dataset_sizes, ratio, 'activation',pattern, criterion=criterion,optimizer=optimizer, name=model_name, + scheduler=scheduler, num_epochs=epoch, rerun=False) # 进行模型的训练 + if pattern == 'test': + weight_file_path = './pth/' + model_name + '/ratio=' + str(ratio) + '/Activation/' + 'best.pth' + activationPruneModel = getPruneModel(model_name, weight_file_path, pattern, ratio) + test_model(activationPruneModel, dataloaders, dataset_sizes, criterion=criterion) + + diff --git a/ActivationPrune.xlsx b/ActivationPrune.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..7e31dfdc4b18bae6b9ab0906f2de0ad8ae82df9d GIT binary patch literal 14675 zcmeHuRdgK7l5Jbe7Be$T7Be$5Gc&VfF-sO(%#y{-7Be&3VzQW-tk>t)m-{XJq2=uE=NP&Jp?1b&?ODI1nx+5fB^SanC z5jqAf9C2Z1$p{`-S!s4>`%bK+9SP2$SvTUtpSX*&R>h^>E+~ZKLq}g8K-qn4P+@O% z$@P)8S~Q@rU6J0SWB^-XHD+o`Y|a&ow&lAqomRdF3E2@kSBPr(rl%>sQgL{3X}scL zh?M{WWDreh5XFUxQY_J6fp=>8mx67>fiC{f;}sYKYx|wqW$4sm=8eSY)En)Of=Z!} zt5%GH4BVkrjN}Rs+JGY+v_#WEmUqN#p+aZ7GMg71vkdBJ>Sl(^-@`F%UNJ&__uSR> zU8j6-N3bdNRPrenYTds&UGWwU&<2Y(66`K7?xL$^)%g~9zxP{@X7Zz#r!Bls35cEl z@Iryt?~Z4J|F#|p=iJRP`~6+^(*yGX(GN8;MwD&!`AnT|_Y+XA2B)X0y>hQ@avR6{ zOXr66Ld5tvnx0D#HpLnBl=uL`_z+u-6E7ESv6xPN?od8$_*A@ACiBM!PypcV4HTg8 zm%wdQWhA))dZP?5E@6RztM6oL{ghdSrt{Z0=H|7IBJR9UB_u z3vMVnZ(4xXpseP)(L>d{1tI0r%Fvo-_S~bysSLlR-Pa-+T_~?UbaI))O2jP44@^_yLu9$Pep(GImt$F; z{p`?v<-->vfl&k{>p%|s_aOOcd87^l{AL^&Lf8PPcOEtj|A-TJdnaondwc6Y`0bxD z^A1RIz-#|!AMJ_q(*2Bx1LuJoMB{5km9616t&pP2ywV8A;<0#T)AiwXTMx zDcg?gareGFOe<^VOOfty^B%>7(n^R2(F{21G#

x@!T1i&gWL?j;5Sfx3>KwI2u7 zoP9)^4kbk}#9?Ti{TRrwqOywm?+><%uJYz^hZM&2G772`e^|E0Z#6@rQe+$HQ$syp zVfPyz7+-Cyca~uX#jdxz9z4hd?+%B|D1>2a_Hg({X(9^>W}6Q$nLJ!VbudrKP9Vew@b!sCA)5NRs*+OLSH;Y5@g#KZsii!W-!(lG$ zi{sCT6bg0?pTG4a>#P9ld|;|Q2L}M&16>5@N&o6Y6)Lj!+l+`E)eUb%SG=;B41`uC zx$o$d+RB>N?liE55Ti(AY(NE7-=1THQJVIYT-iam?>Ow&Gq9%iRF%;eVuOAqvmhrmC|L!Yoc$t(cUwdTd}3S`Z7lU30NIceB+j6-;juN2%D(4%$D2|57EyBr#7t$qZEp7HFMqC8##5noi@;zUpE4j~N_RAs>?F26+MT$J4mFdvq+gra%_ z_Vue1k)45$-ZspBm|QF6@y7ILB<9|($acwU*Zt)(CxSu;@LN9!T3IPqn~Ny24{^7D zcq(??2VJ@%_|#&6Um)eBpD%-IrZ1;u7-^6n;I-Wh+abi-hPt0*b{#aHf><=wt2R<~ zveSX(u5_?%#?3B2#6?xeE`76XseHBcIG6V($Cld1f(oLePDjgFf{HWk7&G62y zs1v?Arx4G;v`tbGnuLOso$4;^J*$D?(@aFuchK2k7FF{`2OOR}6j+nh{ecb-2{37f z;If>~S0SVvBe#XooSnyanQ+Q6QZRkAcEY56S8Gm&L&Vw!qQyyyjZ6)3p9lKbndwQh znf$dpbaAyqmJ&Nvhtnet*R~R8G$M5hk6;eCh-EEh+_n%h1A=zo$#Q)#$rv1T{H#wy zc;b|n;&-#HZu2fF(RB9IXp?n9N+1Oqp+e7F9^%EHP%K)oS@HMIE4{~ahlD3RP+`dn zzX-Sza?bTRij5-W_&)E=o%xB1k;+Jhl(#UyC3`%%|}+UC1&L5$P5>n==t+6F)@Zo&+*A zu_@`VkhnyPZ%#x?;0yMZt)HvG)`%el6tmDUpT*BGN2q5Lu#j_vZh5evF$D;yw}}&Q zr0DJ-k(7d5C%LdKy-Cq-x>F|w8ZhFMp-zDf$z-_k7I30~koNZ?mU8yjeg8};FCmER zAZ&~rW2We6!MbknK|u>1a+q~nZD(u%&(HSD%_PiMwcQlbAGSNs`7FLhA96O{7V|bM zVj7E638M5>34RFaa%IbUOy$rY~zH zD+|{MS7v5>{aV%HtFUY)?zFnE2w}^Lv^{DK%I#CWB@GW3e&Jf6@}@9zUT)C;z_o)l zR`PSpjy5E91WFc=Ne=28OcglUg=y9Gk`qi3U{a z&-LVQydU&6QYL-ea2vmfAAt5jzZR6HO^TB6k`8Cimv%eQYt&*8RlN!F)Hp-hp8n{s z+EAF>Cpc2rwIv`ziwn!k{;Q1Cx>XH`FqgK#aa4&>~O!U3IYhll^x>)+SP{H20#JsPoF^NTl ze@#XYuF9Txt4AgZ^LeXRta>^YY$SLrewXV0gbN*gJig}a{6KKzvIH}{cs*kp%Qc9Y64uGqvZYjR`T);G@4v~=9|T6J!w); zPccp@Xb_TC+LA~LqCo707+Tq#3H8Hb^m%72st*Psf)(Pb0l4+bWnPy@-74 z4mQY*!?p-q`ea=(uTFW=uw%m=GsL%mx^y99dsB~lK;6Rl@Gk1bpCG9t8UDikqVLCR z>Q>9s{8H|YfFm4NGUr|}GwwY)*qF3Sm>kbeY@V#`M?8*(4$DDzkUEUGKFl;yv~4fK z(xi!m?>HMU64OL%8Cdf@3IfFMBDle<{3Nbtw}j3))^poY;yV0swM<6D9!Xs+79fR+ zPb?2WkX^!^VRiqwSX(4)jLsz%_G75A)92cZ9fyVXoUP@%`9;GRXYEKyn7lCp2_jQx zxEqJL%$q<@@+|g61$l*D`#jD;Fw7>rdsqUTu#TLYe|Ol)J`vrJ>P2O*|rVo zpcAf*u1&1MpleY?X=Z0Iyn>IiamcKu6A#nF&KHsM%t2y(X1p|g$PjO*@uHjDT8 zdYw~O{Bf4(X&NoSKNtiVT4I&>$wP$0C&wkzk->Ad?)(SGs@C~F@E_xjc0XmFz&ca@ zXrR3EGEo64j07cNynjF4)ef}{=hFl1)E>3XlH=g#BPX<`oE?WT*VHYK=6F^~qBT#R zI{3UBOXM^Vfdbk{1xlfKr~$m29itP;9V?fwQY#Y)ElMj>33jKWckgZ*dYkkTx4R~m z*fLHf3)VK!O8DxTB}u+KNKL`FzH@f>Zwrb$ZyKF^yBc8|^E3m+ZDj9OLf5MF!_OsU&LR{#s z-H1-Th1@j(sb(KI_w3-Wvr`Bwrpb5%=FWeg^vm-5(*^_g`Teo~*z5mu4eMfIYHP~y z=j)%Oa-un9zru#v`F{D0@Ze0ViR>G~5o(hSg>_<+#VP{tESqg@o``T^BDQFNt~(ZE zE{+>k%?d)FKgfz@$}2lv(^>2?37NKu4Et+5$l5d}6zP$7EWw8N(}VDmJ&#<8b11W% z#&<{SC7z|8!(drSuJe|A@jj_B*A{_8RK?`&n6}>4j@q|J^QD>WaK}Kr8u>^3qn294 zs1B>o^0`=-);0mk9&E;%fncj<*GDYE`W)Lm6@(`7ip;2D?BMYCqwyKyDGPJ}3iN(z+#_>076A zm(Fg3K*|HIq@&J#>o-cUJSO~%qnz2}_(7`$zWVvW^5cpjiUL1#^40VgYR3Bf`Ej+E zy~A$99@_&vl1fs(d0G#VE&tYnJr1iNOwEJ)B5cBIt6fBp)uv3^?SpaYvZ^(-SAQK5 zfHaO+P)WQ3-6ND-VE`%LBok>reB79_9b5`4%&hthz60VVg%Zv2WohAqESFxq<;8r zoW=@hHqqA#79Vr5JLl0pYVn53`t}r~U`6UtqvKzLoH0f@2rZU2vlfH~uNwj(}1=J*RO zLMtNv-g&5%Q`U%Lbv_|L`W+V*%fsWF*pwI(z1<5GkZ{-?drcK_INZFZh+X< z7Qd!eA)};0J6T)`txgG5hQ`u-06`IxRssz z3m@0|MEI>OS9|Vyo5P#n#94u<7e=3T9k2PvgcJFM)1T*L-b-yhw?evB%)@S9`P9pA zP4#uo`&sNg_1W~|z*qkqsr*9HY;pNgw5bb}k)O_jaowW=h)FZk-=KsxHiu-wr1Q)u^~M)RzV z?QB8Lt?F*b8f5m)R>C4HjT9RldBT;%WXqLwez+=Fbfq_+Y^_?kadMj+Rlqh8O@oQE zX8NclAm{wm5GRB(dz5Ey2ylEdx~pSZ?yEIO-JZ)} zbX;d}cTWy!w7G6AI!DA@VW9O9!&ewTb(2iv9cRROCPAx2DAa?3p)xs>Nb%GW{FLEY zRI&407LEv^2Omg73D&3pOMCRyQh0@gY!yqpba=R54Q5E*&&X2-GV~(W%!Ko8* z5E|3E(l5|PGYI!?4PncJo?`hv+m#`w%ygG!hIU^@Sar9;5@YT?`-pYLsN}rCXX)ne zY!^z$->oZygc>K2Q^s5E8Hp_BMRJY1w@t*FX*nGjZXTlOU$k24YAa}BvK%swp{N_lHa}mjdK7fr>m1!!* zb%w%l@TABaPOV3SsqzwfuBs*2tau0^SilBlrPrnI2ZIt7o~RY)XMxp=gsO{{qpkA1 zNReXlan#MWH-TC?N2M&Xpch2h6#y9RH=FKY(~G`?L+Weo*2>W5*9 z_53H|g5|tEM_lGGC+;}$1PEH6YtTi}_-{yG)!Jrnj&G|rPMRQm?=5Si1LF6u6#GGX z%RNrtVE>M^FayZO!(ada6Q=)iN$@9g6>3{MZ*w8PcB(!TP zInQWJhEWDVPSxPsi?|a~jJRqRe)C}`hnf1$eFgTL+v`%7PAA%4>=EbK5fUSAdHJQY zZVvjKWuxaLf+Z`J+&84NlgO*M#fw^>bhTn8>E;*du-7CmIY!>XM@L#r^gYBFqvkPo zO&uFOgm1Ozq^3>s)+K(onK@;lr!`nY?h<1J48>8hjvVg>j60`}-Q~FO@z|o;ij5VE zV!bWy<+s0N%uFHwRD3vRi{08AOhkArqAfF0m-qLiKC)r+iEzpsktB0d3EV-7VmEu zG9FG4wGC<97BtFvu}ZySj$LMy6!SgM9#BV$;=S_*)B@vy9)> zv36%=&Q=yP(d25DAW7L%7ArE>$%pbIQuC9YnIB)uqWZeXw47kr1f1Afq}Vpn9y&3C z_49)!_goxo4|*f^k*1(?Kjc*7LrpwxQa_gPL-!eob*k}~fbJW=((O8!Cw{Z};iaF* z5a;7y%SH0yWj;=dqod8H+K}xF2D%L&WKA;fgAAs>@R9y-^Lyj*v6*1^XQ+Y@cG=fV z{SpF8@ptW(RuX<8HCRxEIMDsiA@Ks~c>*BpC*$l5nhY5{{gnI45}AvKx2BrypSs}% zT+5V4GpRKN?3c}>~uBEy*tS3iS8J zv!iSwL;^?U>O6&V4+TrWJiGbYu{Mgl<)zAP=xgS1jiq7V8Rq3dMJT*tkBn@ini_|w zsxW``Ib>QCf}D^M#Vs_N%h9$@L8Wpi`PRM|**#qtouR)u8vGMZ(5-8^Di zO+@an)FL_*dJ9HAG+gbdLjV>gbTpjFAJCm;kat4lM1lo;Y*60dY|!zr@-kqejau4- z8hbgNd4uwu ziDedDb0vX{*59%bUU*_vL6-zty^qPJE7fuRe3v$QIgk!9a)%LT5b^uG@MTErbdHq0 zhFxfg*e4AsndRW}PLw+l_4wfOT9oXF55|}lThQ3MG_WM;7AUa6`DeP?->tw<(eW{O zkL8TU8Pmv0a%vozUoRZ3MzFYX-jC28K(`S?tJ~tjA7g%)lw>>};Z;Kyo9B)I7jk4W z>%(NDzfm|HJk9>H@=$#B!HC?1Z*rKlC{2c^Mw~}gdzPyo*%(~|>^Lfc4vl0O1Q~X4 z0ILh!7`v&Yv&haIr?u6JbjYmuCmJ#_aq2o5GR`2onFVZoY;WE2Xt3D9C=Az<7Hb9e zv=)+_fc1(gq7ka7nW7DPWdm^~Y-;MM%$4I8ju$JqTZ=hWlKo^%hi^XcuxfZ-%+bT$ zQ$MtP!cqDm-ruwxleyCSZ#lIZzb8S-m({LnYweEWn^;l>y`6&dD;q=xokyBfR)*Xk z%S@Ub_2}SyFg9Tmpxf$bH(>%b?--}pidcJRjQQN|3tTf0Lc1L&?1JDo8uO@#04tja( z#Wy1oIH{q#nN*cGqjF?~qD!`r@?JrbgF%)ob6V+elk0P4Cm0peMZKqfFb?|z{$~Ty zvZ7tYD5?>yGGg$op&3!7*9$;=@cUVjUP-FYzE(97nEZ2rbDS}kI6r;kS+aOn{1amr zWJ*tx8IRucbP#i{V?=3mKU%^4PS~s9BG5ELxQyaBW5{Yd^hjGJ)fe zEciB?M&?#VAAq1BN&9L^y#*Jq>}#fkk-5f#o{(!EL238kT}(y?a#L!nl@N=^;#q#2 z3aWxU=AJIa(NO?W(9p_`69xAoK9{h2u1J;oP)7IF*{YUIl9^KAK?iJ1w;G=hD=oF( zh5D944rwsVS-u{OL;$ge;ZQL`uH*+i3G~nN@qF+413{YEW`jl!P_IJ%Z%!fZYIE=FDVd^- z>Mcys$;@W7&}-p3SENOphb!TOo3+m?2$O_1Rs`N*)4o@mP7{d9p|!drTb`YO1d$%9 zvde{wetn8zwJ zp`csni13<@bP-<2PmOg=s?uMW8J1k5AIBsm2}IG4GP{gPACx1{COftvRB)|Xp3Fn0|oQ(uZuZrTK<^NLLb*b>^Q%S><_`6h1EGl!*^qvUCD-0#d? z@gA@og{H|HaSo24V%A8S`V@E*NQYnPib%O%Grdrwz4a9vF$*YKHm8)!r6S+eFNcduZO9kHS+ zxO?(!n0r(oM}HB!{$t0**}~At)I{0E$9lNFP zSFPIGEB5S;)WI>$rfVVo(!_SGA!L#_CG!2eXw2)u_l+!_h<}%9U|$5W$&`O|0K$&_q~&B^rXG)&Q@~` zRAb#B7E(jZvE|fhhORy#sBi-LA zyReTjcb{i0wAtoGR@x>$GXqK9`;2pHf)RB+I1skWsV5OktKU9etXYBxAa$*iHSu|i zN^>{N2a~i#lyeDeckM?O2Zt4=(z1TsSCQ~Q`{iyE+?T4}XNj7@o%pZHI-71r6tPgU zhTK=wMF+Qus2vczIAL=aP`qFjSnnIKVlPC{ulGV$p4yMBvAs3#8iQh_M|nm28bf&` zBt--OR-_3Ma-6LGq7gq(iHZCv;cCXY(~=_vqLl^`d4erVx0v9j6=Xp;v|(~45p(yG z3s(dOYxa|a(@hhfpu)U;=T_LjPKZuI7}TNox%_9xAQ6KCRg94R!%af>F^3<`t_PdW zKHG9FAxPTIx5Sbo3PWeVHaIVE#dJla5VSgC_uO&4o))z?+ag1d!-J9_r5eH}2MUwp z2@;Yy2n4J;fk6wAgL4U zxwr&%3Iq|F=-07&G~xRMRld07H(Lkpf-f&+goykOd!J@$vDp;` z{#ph4a(75G{_qm%T_G-Ei7E|jwsC}QGL#W{$klkHk&j3!RaHqx32BOk`nY)cD9el# zjMMrBoZfUjEh!ZlDES>(q z{`{R}jn5lpgG^aqaB_YSRLUIf0E-a zFos)V8+GhbejRCH#J7q1n^|EZ`#9zGq5vMTAvVYM)4b`G@t3}8d1jKy`%e>E;UgaE zHIKCBSRAL)J8s}@>%Lr^#+pKVQl=e2fp~YryEwr*`f5gv)B$w%AQ`f`KG& z3qvhhgxpAbn!4PKsHc8hwXO-27Zr5DnQorh6#Nk=uUtDb+8Y^d`o#vMM;~?@S%tl! zk?Div%yu34CqA>0)tO~<+UGK47Y=Z1RhCgALswhj*&#O4=&Qr6{pz9fDgHBXijO6? zTMe$*8TU>z>5dIqBV}rKkbBZo8$#~r z?0u(WkB78=DRYAqtUdbGIzNGxhJUX?O!}srhyZ`GRp1}RKN!&gNQEw@PRgb(E`M+$ z!*Ayw=#4Dxj0|$&80#48*WO`owPyuH#0MlAqBlc}!+?^2@(Y8-O+k^S1r(A5L5JKC zTAK$u39z6b!oI_L3jnME_m2MGG=%B@PebtTO|trcO|$?8I^sV@#MscrSlGhQ((aES zw~1SU>HR~+3%-%JtH*te1l(?!qnL)I)O88M7tZ_(Pw0`b=9q&7dgv7tdy^W^>cFP`}62M(I#Zml~| zNeL&k6(SwOWVlSe6V)Sp^}Dhxa`ClO^q23|$3j#8+mT;%-pdmPc2*H6r$qYKtY&QQ zWct5p4LD~1arh+YTlW#74s1X^BlO&h$i7WWCG@gAHS`+b;drBT*!JZkxk^8unkTSH z=fU}0W;^@cdwkHwQ$>6AcQvha7C{Llp3YDHvc7*{+ZKaC>6C3f0#xI~$ZbxSwRDXI z8|F7%7+HfZrflX5%*H?`76jdg8lLpOLv@#QI3PT;DsOqO-Zm5(#n1_viOe(v!;`Y= zyb5B>j5vcuHQ>*((R2h*GAI!UxI>r=-sZ1{EMJ`nmBZpxJ(MSJKg4w(PtM1Hyr20T zRBM@DnC9|9$TyOoZK`@rc}Jz5o7kr+*IgKi~i6;U@*@zXJUAv5S8i{_*}5 zNYelE5XSF@f0d&DtKm8@QU2G$^xtuQ7b5>cq6A86ev>8tZv4CK^Ox~GaPu8#{7VG- zyXo)xtY4-Pz^wHj_x-=AwSEWuUG4M>FdC@-`7Z%~X`g-v{9Qcs3or)xcfem#qTf+| zFOvU4Il=f7<@b{L?*P9SEPesx0at;4?bn~p__Ig9qx`;d{Dq=N^(V@|ts{R&`0M2H zmpuTGMF%XW{5F;R-THT){$=mL_~-q9W9h$T?f*XL<9`eEPX_)S^mqRJ)zZKZf41~5 rZ2G(PU&-i~EdbyQEI9q=DE*U~6r{m{(fS8bBLWnF3;R=+KhFLS=D~?! literal 0 HcmV?d00001 diff --git a/Conv2dNew.py b/Conv2dNew.py new file mode 100644 index 0000000..b752f0a --- /dev/null +++ b/Conv2dNew.py @@ -0,0 +1,201 @@ +import math +import numpy as np +import copy +import torch +from statistics import mean +def determine_padding(filter_shape, output_shape="same"): + ''' + :param filter_shape: + :param output_shape: + :return: + ''' + # No padding + if output_shape == "valid": + return (0, 0), (0, 0) + # Pad so that the output shape is the same as input shape (given that stride=1) + elif output_shape == "same": + filter_height, filter_width = filter_shape + + # Derived from: + # output_height = (height + pad_h - filter_height) / stride + 1 + # In this case output_height = height and stride = 1. This gives the + # expression for the padding below. + pad_h1 = int(math.floor((filter_height - 1)/2)) + pad_h2 = int(math.ceil((filter_height - 1)/2)) + pad_w1 = int(math.floor((filter_width - 1)/2)) + pad_w2 = int(math.ceil((filter_width - 1)/2)) + else: + pad_h1 = output_shape[0] + pad_h2 = output_shape[0] + pad_w1 = output_shape[1] + pad_w2 = output_shape[1] + + return (pad_h1, pad_h2), (pad_w1, pad_w2) + +def image_to_column(images, filter_shape, stride, output_shape='same'): + filter_height, filter_width = filter_shape + pad_h, pad_w = determine_padding(filter_shape, output_shape)# Add padding to the image + images_padded = torch.nn.functional.pad(images, [pad_w[0],pad_w[1],pad_h[0],pad_h[1]], mode='constant')# Calculate the indices where the dot products are to be applied between weights + # and the image + k, i, j = get_im2col_indices(images.shape, filter_shape, (pad_h, pad_w), stride) + + # Get content from image at those indices + cols = images_padded[:, k, i, j] + channels = images.shape[1] + # Reshape content into column shape + # cols = cols.transpose(1, 2, 0).reshape(filter_height * filter_width * channels, -1) + cols = cols.permute(1, 2, 0).reshape(filter_height * filter_width * channels, -1) + + return cols + +def get_im2col_indices(images_shape, filter_shape, padding, stride=(1,1)): # stride:(H,W) + # First figure out what the size of the output should be + batch_size, channels, height, width = images_shape + filter_height, filter_width = filter_shape + pad_h, pad_w = padding + out_height = int((height + np.sum(pad_h) - filter_height) / stride[0] + 1) + out_width = int((width + np.sum(pad_w) - filter_width) / stride[1] + 1) + + i0 = np.repeat(np.arange(filter_height), filter_width) + i0 = np.tile(i0, channels) + i1 = stride[0] * np.repeat(np.arange(out_height), out_width) + j0 = np.tile(np.arange(filter_width), filter_height * channels) + j1 = stride[1] * np.tile(np.arange(out_width), out_height) + i = i0.reshape(-1, 1) + i1.reshape(1, -1) + j = j0.reshape(-1, 1) + j1.reshape(1, -1) + k = np.repeat(np.arange(channels), filter_height * filter_width).reshape(-1, 1) + return (k, i, j) + +class Layer(object): + + def set_input_shape(self, shape): + """ Sets the shape that the layer expects of the input in the forward + pass method """ + self.input_shape = shape + + def layer_name(self): + """ The name of the layer. Used in model summary. """ + return self.__class__.__name__ + + def parameters(self): + """ The number of trainable parameters used by the layer """ + return 0 + + def forward_pass(self, X, training): + """ Propogates the signal forward in the network """ + raise NotImplementedError() + + def backward_pass(self, accum_grad): + """ Propogates the accumulated gradient backwards in the network. + If the has trainable weights then these weights are also tuned in this method. + As input (accum_grad) it receives the gradient with respect to the output of the layer and + returns the gradient with respect to the output of the previous layer. """ + raise NotImplementedError() + + def output_shape(self): + """ The shape of the output produced by forward_pass """ + raise NotImplementedError() + +class Execution(Layer): + """A 2D Convolution Layer. + Parameters: + ----------- + n_filters: int + The number of filters that will convolve over the input matrix. The number of channels + of the output shape. + filter_shape: tuple + A tuple (filter_height, filter_width). + input_shape: tuple + The shape of the expected input of the layer. (batch_size, channels, height, width) + Only needs to be specified for first layer in the network. + padding: string + Either 'same' or 'valid'. 'same' results in padding being added so that the output height and width + matches the input height and width. For 'valid' no padding is added. + stride: int + The stride length of the filters during the convolution over the input. + """ + def __init__(self,ratio): + self.ratio = ratio + pass + + def conv2d(self, input,weight,bias,stride,padding): + self.input = input + self.weight = weight + self.bias = bias + self.stride = stride + self.padding = padding + + self.n_filters = self.weight.shape[0] # 卷积核的个数 + self.filter_shape = (self.weight.shape[2], self.weight.shape[3]) + self.input_shape = [self.input.shape[1],self.input.shape[2],self.input.shape[3]] + self.trainable = False + + batch_size, channels, height, width = self.input.shape + # Turn image shape into column shape + # (enables dot product between input and weights) + self.X_col = image_to_column(self.input, self.filter_shape, stride=self.stride, output_shape=self.padding) + # Turn weights into column shape + if self.ratio != 0: + # compareRatio = math.ceil(self.ratio * self.X_col.shape[1]) + self.X_col = self.activationSlidePrune(self.X_col,self.ratio) + self.W_col = self.weight.reshape((self.n_filters, -1)) + # Calculate output + if self.bias is not None: + output = torch.einsum('ij,jk->ik',self.W_col,self.X_col) + (torch.unsqueeze(self.bias,1) ) + else: + output = torch.einsum('ij,jk->ik', self.W_col, self.X_col) + # Reshape into (n_filters, out_height, out_width, batch_size) + output = output.reshape(self.output_shape() + (batch_size, )) + # Redistribute axises so that batch size comes first + return output.permute(3,0,1,2) + + + def output_shape(self): + channels, height, width = self.input_shape + pad_h, pad_w = determine_padding(self.filter_shape, output_shape=self.padding) + output_height = (height + np.sum(pad_h) - self.filter_shape[0]) / self.stride[0] + 1 + output_width = (width + np.sum(pad_w) - self.filter_shape[1]) / self.stride[0] + 1 + return self.n_filters, int(output_height), int(output_width) + + def parameters(self): + return np.prod(self.W.shape) + np.prod(self.w0.shape) + + def compressionRateStatistics(self,input,andSum,compareRatio): + pruneNumber = 0 + zerosNumber = 0 + for i in range(input.shape[1]): + if andSum[i] == 0: + zerosNumber += 1 + if andSum[i] != 0 and andSum[i] <= compareRatio: + pruneNumber += 1 + print('pruneNumberRatio=', pruneNumber / (input.shape[1])) + print('zerosNumberRatio=', zerosNumber / (input.shape[1])) + + def accuracyTest(self,andSum): + for i in range(len(andSum)): + print(i,andSum[i]) + + def activationSlidePrune(self,input,ratio): + + matrixOne = torch.ones(input.shape,device='cuda:0') # 设置一个全1矩阵 + + x = torch.clone(torch.detach(input)) + andOp = torch.logical_and(matrixOne,x) # 进行与操作 + andSum = torch.sum(andOp,dim=1) # 每行的数据进行一个相加 + + # self.compressionRateStatistics(input,andSum,compareRatio) + # self.accuracyTest(andSum) + p = (sum(andSum) // len(andSum))*ratio + # zeroTensor = torch.zeros_like(andSum) + # zeroTensor[(andSum<=p),] = 1 + # pruneRatio = sum(zeroTensor) + x[(andSum<=p),] = 0 + + return x + +# image = np.random.randint(0,255,size=(1,3,32,32)).astype(np.uint8) +# input_shape=image.squeeze().shape +# conv2d = Conv2D(16, (3,3), input_shape=input_shape, padding='same', stride=1) +# conv2d.initialize(None) +# output=conv2d.forward_pass(image,training=True) +# print(output.shape) \ No newline at end of file diff --git a/K_means.py b/K_means.py new file mode 100644 index 0000000..2a754db --- /dev/null +++ b/K_means.py @@ -0,0 +1,153 @@ +# 聚类算法 + +import random +import pandas as pd +import numpy as np +import copy +import math + + +# 计算距离 +def Dis(dataSet, centroids, k): + # 处理质心 + # 如果之前分类的个数不够k类 + if len(centroids) < k: + centroids = np.append(centroids, random.sample(list(dataSet), k-len(centroids)), axis=0) + + # 处理节点 + clalist=[] + for data in dataSet: + #(np.tile(a,(2,1))就是把a先沿x轴复制1倍,即没有复制,仍然是 [0,1,2]。 再把结果沿y方向复制2倍得到array([[0,1,2],[0,1,2]])) + diff = np.tile(data, (k, 1)) + mul_Diff = np.multiply(diff, centroids) + mul_Dist = np.sum(mul_Diff, axis=1) #和 (axis=1表示行) + clalist.append(mul_Dist) + clalist = np.array(clalist) #返回一个每个点到质点的距离len(dateSet)*k的数组 + return clalist + + +# 计算质心 +def classify(dataSet, centroids, k): + # 计算样本到质心的距离 + clalist = Dis(dataSet, centroids, k) + # 分组并计算新的质心 + minDistIndices = np.argmax(clalist, axis=1) #axis=1 表示求出每行的最小值的下标 + newCentroids = pd.DataFrame(dataSet).groupby(minDistIndices).mean() #DataFramte(dataSet)对DataSet分组,groupby(min)按照min进行统计分类,mean()对分类结果求均值 + newCentroids = newCentroids.values + + # 对新质心,也分配成1-value_sum的形式,否则会出现小数 + for centro in newCentroids: + # centro是一个一维向量 + sorted_data=np.argsort(centro) # 排序信息 + value = 1 + for valueIndex in sorted_data: + centro[valueIndex] = value + value += 1 + + # 计算变化量 + # 有可能新分类个数不够k + if len(newCentroids) != len(centroids): + changed = 1 # 肯定有变化 + else: + changed = newCentroids - centroids # 有可能没变化 + + return changed, newCentroids + + +#确定初始中心点 +def euler_distance(point1: list, point2: list) -> float: + """ + 计算两点之间的欧拉距离,支持多维 + distance = 0.0 + for a, b in zip(point1, point2): + distance += math.pow(a - b, 2) + return math.sqrt(distance) + """ + distance = 0.0 + for a, b in zip(point1, point2): + distance += a*b + return distance + + +def get_closest_dist(point, centroids): + min_dist = math.inf # 初始设为无穷大 + for i, centroid in enumerate(centroids): + dist = euler_distance(centroid, point) + if dist < min_dist: + min_dist = dist + return min_dist + + +def kpp_centers(data_set: list, k: int) -> list: + """ + 从数据集中返回 k 个对象可作为质心 + """ + cluster_centers = [] + cluster_centers.append(random.choice(data_set)) + d = [0 for _ in range(len(data_set))] + for _ in range(1, k): + total = 0.0 + for i, point in enumerate(data_set): + d[i] = get_closest_dist(point, cluster_centers) # 与最近一个聚类中心的距离 + total += d[i] + total *= random.random() + for i, di in enumerate(d): # 轮盘法选出下一个聚类中心; + total -= di + if total > 0: + continue + cluster_centers.append(data_set[i]) + break + return cluster_centers + + +# 使用k-means分类 +def kmeans(dataSet, k): + # 将dataSet预处理成为算距离需要使用的重要程度矩阵 + valueSet = np.zeros(dataSet.shape, dtype=int) # 初始矩阵 + for index in range(len(dataSet)): + data = dataSet[index] + value = valueSet[index] + sorted_data=list(map(abs,data)) # 绝对值 + sorted_data=np.argsort(sorted_data) # 排序信息 + i = 1 # 对于越小的值,分配的i越小 + for valueIndex in sorted_data: + value[valueIndex] = i + i += 1 + + # 随机取质心 + # centroids = random.sample(dataSet, k) + centroids=kpp_centers(valueSet, k) + + # 更新质心 直到变化量全为0 + i=100 + changed, newCentroids = classify(valueSet, centroids, k) + # while(i): #while np.any(changed != 0) + while np.any(changed != 0) and i > 0: + changed, newCentroids = classify(valueSet, newCentroids, k) + i=i-1 + print("第{}次迭代".format(100-i)) + + centroids = sorted(newCentroids.tolist()) #tolist()将矩阵转换成列表 sorted()排序 + + clalist = Dis(valueSet, centroids, k) + minDistIndices = np.argmax(clalist, axis=1) + return minDistIndices + + +def getCluster(input, clusters_num): + # 对卷积层聚类为4维,对全连接层聚类为2维 + if len(input.shape) == 2: # 如果是全连接层 + fcValues = input.detach().cpu().numpy() # 转成numpy + # input.shape[1]是聚类基本单位的数据个数 + clusterIndex = kmeans(fcValues, clusters_num) # 分类 + elif len(input.shape) == 4: # 卷积层 + kernel_size = input.shape[3] # 卷积核尺寸 + preShape = input.shape[:2] # 四维数据的前两维 + inputCut = input.view(preShape[0]*preShape[1], kernel_size*kernel_size) # 降维后的数据,四维到二维 + convValues = inputCut.detach().cpu().numpy() # 转成numpy + clusterIndex = kmeans(convValues, clusters_num) # 分类 + clusterIndex.resize(preShape) + else: + clusterIndex = None + + return clusterIndex \ No newline at end of file diff --git a/Op.py b/Op.py new file mode 100644 index 0000000..2db2640 --- /dev/null +++ b/Op.py @@ -0,0 +1,50 @@ +from ActivationPrune import activationPruneModelOp +from WeightPrune import weightPruneModelOp +import os +def makeDir(model_name,ratio): # 创建文件夹 + if not os.path.exists('./pth/' + model_name + '/ratio=' + str(ratio)): + os.makedirs('./pth/' + model_name + '/ratio=' + str(ratio) + '/Activation') + if ratio == 0: # ratio=0只有两种情况,一是训练初始模型的时候,二是单对权重进行聚类剪枝的时候 + os.makedirs('./pth/' + model_name + '/ratio=0/' + 'Weight') + else: + os.makedirs('./pth/' + model_name + '/ratio=' + str(ratio) + '/ActivationWeight') + +def Op(operation,model_name,batch_size,img_size,ratio,epochA,epochAW,weightParameter,LinearParameter): + if operation == 'trainInitialModel': # 训练初始模型 + patternA = 'train' + ratio = 0 + makeDir(model_name,ratio) # 判断是否为训练初始模型并创建相应的文件夹 + activationPruneModelOp(model_name, batch_size, img_size,patternA,ratio,epochA) + + if operation == 'onlyActivationPruneWithRetrain': # 只进行输入特征图的剪枝,不进行权重的聚类剪枝 + patternA = 'retrain' + makeDir(model_name,ratio) + activationPruneModelOp(model_name, batch_size, img_size,patternA,ratio,epochA) + + if operation == 'onlyWeightPruneWithRetrain': # 这有bug + patternW = 'train' # patternW='retrain'是读入初始模型进行权重聚类剪枝压缩再重训练 + ratio = 0 + makeDir(model_name,ratio) + weightPruneModelOp(model_name, batch_size, img_size, ratio, patternW, epochAW, weightParameter,LinearParameter) + + if operation == 'activationWeightPruneWithRetrain': + patternA = 'retrain' + patternW = 'retrain' # patternW='retrain'是读入已经经过输入特征图剪枝的模型进行压缩在重训练 + makeDir(model_name, ratio) + activationPruneModelOp(model_name, batch_size, img_size, patternA, ratio, epochA) + weightPruneModelOp(model_name, batch_size, img_size, ratio, patternW, epochAW, weightParameter, LinearParameter) + + if operation == 'onlyActivationPruneTest': + patternA = 'test' + makeDir(model_name, ratio) + activationPruneModelOp(model_name, batch_size, img_size, patternA, ratio, epochA) + + if operation == 'activationWeightPruneTest': + patternW = 'test' + makeDir(model_name, ratio) + weightPruneModelOp(model_name, batch_size, img_size, ratio, patternW, epochAW, weightParameter, LinearParameter) + + if operation == 'weightRetrainAfterActivationPrune': + patternW = 'retrain' + makeDir(model_name, ratio) + weightPruneModelOp(model_name, batch_size, img_size, ratio, patternW, epochAW, weightParameter, LinearParameter) \ No newline at end of file diff --git a/WeightPrune.py b/WeightPrune.py new file mode 100644 index 0000000..ab8a67e --- /dev/null +++ b/WeightPrune.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +import torch +import torchvision.transforms as transforms +import torch.optim as optim +from torch.utils.data import DataLoader +import torch.nn.utils.prune as prune +import pandas as pd +import numpy as np +from K_means import getCluster +import torch.nn as nn +from model import * +from train import * +from ActivationPrune import Conv2dTest,LinearTest +from torch.nn.parameter import Parameter + +def scp_upgrade(kernel,old_scp): + old_scp+=np.abs(kernel.cpu().detach().numpy()) + return old_scp + +def scp_binaeryzation(scps,C): + if len(scps.shape)==3: + for r in np.arange(0,scps.shape[0]): + series=pd.Series(scps[r].ravel()) + rank_info=series.rank() + for i in np.arange(0,scps[r].shape[0]): + for j in np.arange(0,scps[r].shape[1]): + index=i*scps[r].shape[0]+j + if(rank_info[index]<=C): + scps[r][i][j]=0 + else: + scps[r][i][j]=1 + + elif len(scps.shape)==2: + for r in np.arange(0,scps.shape[0]): + series=pd.Series(scps[r].ravel()) + rank_info=series.rank() + for i in np.arange(0,scps[r].shape[0]): + index=i + if(rank_info[index]<=C): + scps[r][i]=0 + else: + scps[r][i]=1 + +class PatternPruningMethod(prune.BasePruningMethod): + PRUNING_TYPE= "unstructured" + + def __init__(self, custers_num, cut_num, pruning_type): + self.clusters_num=custers_num + self.cut_num=cut_num + self.pruning_type=pruning_type + prune.BasePruningMethod.__init__(self) + + def compute_mask(self, t, default_mask): + mask=default_mask.clone()#复制一个mask大小等于当前层的filter + if self.pruning_type=='conv': + scps=np.zeros(self.clusters_num*default_mask.shape[-1]*default_mask.shape[-1])#复制num个scp,表示每一个卷积族的pattern + scps.resize(self.clusters_num,default_mask.shape[-1],default_mask.shape[-1]) + + clusters=getCluster(t,self.clusters_num)#输入当前层的filter,获得其聚类信息 + + print(clusters) + + for i in np.arange(0,clusters.shape[0]):#遍历所有kernel,计算所有cluster的scp + for j in np.arange(0,clusters.shape[1]): + scp_upgrade(t[i][j],scps[clusters[i][j]]) + + scp_binaeryzation(scps,self.cut_num)#根据scp二值化获得真正的pattern + print(scps) + + for i in np.arange(0,clusters.shape[0]):#根据scp和每个kernel的族编号得到最终的mask + for j in np.arange(0,clusters.shape[1]): + mask[i][j]=torch.from_numpy(scps[clusters[i][j]]) + + elif self.pruning_type=='full': + + scps=np.zeros(self.clusters_num*default_mask.shape[-1]) + scps.resize(self.clusters_num,default_mask.shape[-1]) + + clusters=getCluster(t,self.clusters_num) + + print(clusters) + + for i in np.arange(0,clusters.shape[0]): + scp_upgrade(t[i],scps[int(clusters[i])]) + + scp_binaeryzation(scps,self.cut_num)#根据scp二值化获得真正的pattern + print(scps) + + for i in np.arange(0,clusters.shape[0]):#根据scp和每个kernel的族编号得到最终的mask + mask[i]=torch.from_numpy(scps[int(clusters[i])]) + + + return mask + +def weightPrune(model_name,ratio,weightPrameter,LinearPrameter,inplace=False): + def activationWeightPruneOp(module): + for name, child in module.named_children(): + if isinstance(child, nn.Conv2d): + print(child) + print(child.weight.shape) + print('custers_num=6', 'cut_num=', child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, + 'pruning_type=conv') + convPruning = PatternPruningMethod(custers_num=6, + cut_num=child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, + pruning_type='conv') + convPruning.apply(child, 'weight', 6, child.weight.shape[-1] * child.weight.shape[-2] / weightPrameter, 'conv') + + # 针对输入特征图添加剪枝操作 + activationWeightPruneConv = Conv2dTest( + ratio, + child.in_channels, + child.out_channels, child.kernel_size, stride=child.stride, padding=child.padding, + dilation=child.dilation, groups=child.groups, bias=(child.bias is not None), + padding_mode=child.padding_mode + ) + if child.bias is not None: + activationWeightPruneConv.bias = child.bias + activationWeightPruneConv.weight = Parameter(child.weight) + module._modules[name] = activationWeightPruneConv + child._forward_pre_hooks + + elif isinstance(child, nn.Linear): + print(child) + print(child.weight.shape) + print('custers_num=4', 'cut_num=', child.weight.shape[-1] / LinearPrameter, 'pruning_type=full') + fullPruning = PatternPruningMethod(custers_num=8, cut_num=child.weight.shape[-1] / LinearPrameter, + pruning_type='full') + fullPruning.apply(child, 'weight', 8, child.weight.shape[-1] / LinearPrameter, 'full') + child._forward_pre_hooks + else: + activationWeightPruneOp(child) # 这是用来迭代的,Maxpool层的功能是不变的 + if not inplace: + model = copy.deepcopy(model_name) + activationWeightPruneOp( model_name) # 为每一层添加量化操作 + return model + +def getModel(modelName): + if modelName == 'LeNet': + return getLeNet() # 加载原始模型框架 + elif modelName == 'AlexNet': + return getAlexnet() + elif modelName == 'VGG16': + return get_vgg16() + elif modelName == 'SqueezeNet': + return get_squeezenet() + elif modelName == 'ResNet': + return get_resnet18() + +def getDataSet(modelName,batchSize,imgSize): + if modelName == 'VGG16' or modelName == 'AlexNet' or modelName == 'ResNet' or modelName == 'SqueezeNet': + dataloaders, dataset_sizes = load_cifar10(batch_size=batchSize, pth_path='./data', + img_size=imgSize) # 确定数据集 + elif modelName == 'LeNet': + dataloaders, dataset_sizes = load_mnist(batch_size=batchSize, path='./data', img_size=imgSize) + + return dataloaders,dataset_sizes + +def weightPruneModelOp(model_name,batch_size,img_size,ratio,pattern,epoch,weightParameter,LinearParameter): + net = getModel(model_name) # 得到模型具体结构 + dataloaders, dataset_sizes = getDataSet(model_name,batch_size,img_size) # 读取数据集 + criterion = nn.CrossEntropyLoss() + if pattern == 'retrain' or pattern == 'train': + if pattern == 'retrain': + getPth = './pth/' + model_name + '/ratio=' +str(ratio)+ '/Activation' + '/best.pth' #读取经过输入特征图剪枝训练后的权重模型 + else: + getPth = './pth/' + model_name + '/ratio=0' + '/Activation' + '/best.pth' + optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.8) # 设置学习率下降策略 + net.load_state_dict(torch.load(getPth)) + weightPrune(net, ratio ,weightParameter,LinearParameter) + train_model_jiang(net,dataloaders, dataset_sizes,ratio,'weight', pattern,criterion=criterion, optimizer=optimizer, name=model_name, + scheduler=scheduler, num_epochs=epoch, rerun=False) + + if pattern == 'test': + getPth = './pth/' + model_name+ '/ratio=' +str(ratio)+ '/ActivationWeight/' + 'best.pth' + weightPrune(net, ratio,weightParameter,LinearParameter) + net.load_state_dict(torch.load(getPth)) + test_model(net, dataloaders, dataset_sizes, criterion=criterion) + + + + + diff --git a/main.py b/main.py new file mode 100644 index 0000000..42890d8 --- /dev/null +++ b/main.py @@ -0,0 +1,39 @@ +from ActivationPrune import * +from WeightPrune import weightPruneModelOp +import os +from Op import Op +import torch +torch.set_printoptions(threshold=5e3,edgeitems=15) + +if __name__ == '__main__': + model_name = 'AlexNet' # 确定模型名称 + batch_size = 32 # 确定批训练图片数目 + img_size = 227 # 确定单张图片大小 + ratio = 0.95 # 确定输入特征图剪枝比率 + epochA = 40 # 确定针对输入特征图剪枝重训练轮数或原始模型(不掺杂任何剪枝训练)轮数 + epochAW = 40 # 确定针对卷积核聚类剪枝重训练轮数 + weightParameter = (4/1) + LinearParameter = (4/1) + ''' + 一共设置有六种针对模型的操作 + 1. operation = 'trainInitialModel',意为训练初始模型,此时不参杂任何剪枝操作,单纯训练初始模型 + 2. operation = 'onlyActivationPruneWithRetrain',意为只针对输入特征图进行剪枝,并进行重训练 + 3. operation = 'onlyWeightPruneWithRetrain',意为只针对权重值进行聚类剪枝,并进行重训练 + 4. operation = 'weightRetrainAfterActivationPrune',意为此时我已经单独完成了输入特征图剪枝的行为,保存了模型,此时我想再进行权重聚类剪枝 + 5. operation = 'activationWeightPruneWithRetrain',意为对输入特征图剪枝并进行重训练,对其生成的模型权重进行聚类剪枝并进行重训练 + 6. operation = 'onlyActivationPruneTest',意为只针对输入特征图剪枝后的模型进行inferernce,测试模型精度 + 7. operation = 'activationWeightPruneTest',意为针对输入特征图与权重聚类剪枝后的模型进行inference,测试模型精度 + ''' + operation = 'trainInitialModel' + Op(operation,model_name,batch_size,img_size,ratio,epochA,epochAW,weightParameter,LinearParameter) + ''' + 目录说明 + -pth + --modelName + ---ratio=0 + ----Activation:存放不经过任何剪枝的初始模型 + ----Weight:存放只经过权重聚类剪枝后的初始模型 + ---ratio=0.1 + ----Activation:存放经过输入特征图剪枝后的模型 + ----ActivationWeight:存放经过输入特征图剪枝后又进行权重聚类剪枝后的模型 + ''' \ No newline at end of file diff --git a/model.py b/model.py new file mode 100644 index 0000000..7a56d8f --- /dev/null +++ b/model.py @@ -0,0 +1,632 @@ +from collections import OrderedDict +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +import torch.nn.functional as F +import math +import torch + +class AlexNet(nn.Module): + + def __init__(self, num_classes=10): + super(AlexNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(96, 256, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(256, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + # nn.Softmax() + ) + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + return x +class LeNet(nn.Module): + def __init__(self, num_classes=10): + super(LeNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(6, 16, kernel_size=5), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Conv2d(16, 120, kernel_size=5), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Sequential( + nn.Linear(120, 84), + nn.ReLU(inplace=True), + nn.Linear(84, num_classes) + ) + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + +class VGG(nn.Module): + + def __init__(self, features, num_classes=10): + super(VGG, self).__init__() + self.features = features + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + self._initialize_weights() + + def forward(self, x): + if hasattr(self, "first_input_prune"): + x = self.first_input_prune(x) + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + + +class ResNet(nn.Module): + def __init__(self, block, layers, num_classes=10): + self.inplanes = 64 + super(ResNet, self).__init__() + + m = OrderedDict() + m['conv1'] = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) + m['bn1'] = nn.BatchNorm2d(64) + m['relu1'] = nn.ReLU(inplace=True) + m['maxpool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.group1 = nn.Sequential(m) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + self.avgpool = nn.Sequential(nn.AvgPool2d(7)) + + self.group2 = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(512 * block.expansion, num_classes)) + ]) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.group1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.group2(x) + + return x + + +class ResNetBasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(ResNetBasicBlock, self).__init__() + m = OrderedDict() + m['conv1'] = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + m['relu1'] = nn.ReLU(inplace=True) + m['conv2'] = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.group1 = nn.Sequential(m) + self.relu = nn.Sequential(nn.ReLU(inplace=True)) + self.downsample = downsample + + def forward(self, x): + if self.downsample is not None: + residual = self.downsample(x) + else: + residual = x + out = self.group1(x) + residual + out = self.relu(out) + return out + +class Fire(nn.Module): + + def __init__(self, inplanes, squeeze_planes, + expand1x1_planes, expand3x3_planes): + super(Fire, self).__init__() + self.inplanes = inplanes + + self.group1 = nn.Sequential( + OrderedDict([ + ('squeeze', nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)), + ('squeeze_activation', nn.ReLU(inplace=True)) + ]) + ) + + self.group2 = nn.Sequential( + OrderedDict([ + ('expand1x1', nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)), + ('expand1x1_activation', nn.ReLU(inplace=True)) + ]) + ) + + self.group3 = nn.Sequential( + OrderedDict([ + ('expand3x3', nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)), + ('expand3x3_activation', nn.ReLU(inplace=True)) + ]) + ) + + def forward(self, x): + x = self.group1(x) + return torch.cat([self.group2(x), self.group3(x)], 1) + + +class SqueezeNet(nn.Module): + + def __init__(self, num_classes=1000): + super(SqueezeNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + # Final convolution is initialized differently form the rest + final_conv = nn.Conv2d(512, num_classes, kernel_size=1) + self.classifier = nn.Sequential( + nn.Dropout(p=0.5), + final_conv, + nn.ReLU(inplace=True), + nn.AvgPool2d(13) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + gain = 2.0 + if m is final_conv: + m.weight.data.normal_(0, 0.01) + else: + fan_in = m.kernel_size[0] * m.kernel_size[1] * m.in_channels + u = math.sqrt(3.0 * gain / fan_in) + m.weight.data.uniform_(-u, u) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.features(x) + x = self.classifier(x) + return x.view(x.size(0), self.num_classes) + + +class Inception3(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=False, transform_input=False): + super(Inception3, self).__init__() + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3,stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.group1 = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(2048, num_classes)) + ]) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.Tensor(X.rvs(m.weight.data.numel())) + m.weight.data.copy_(values.reshape(m.weight.shape)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def forward(self, x): + if self.transform_input: + x = x.clone() + x[0] = x[0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x[1] = x[1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x[2] = x[2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + # 299 x 299 x 3 + x = self.Conv2d_1a_3x3(x) + # 149 x 149 x 32 + x = self.Conv2d_2a_3x3(x) + # 147 x 147 x 32 + x = self.Conv2d_2b_3x3(x) + # 147 x 147 x 64 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # 73 x 73 x 64 + x = self.Conv2d_3b_1x1(x) + # 73 x 73 x 80 + x = self.Conv2d_4a_3x3(x) + # 71 x 71 x 192 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # 35 x 35 x 192 + x = self.Mixed_5b(x) + # 35 x 35 x 256 + x = self.Mixed_5c(x) + # 35 x 35 x 288 + x = self.Mixed_5d(x) + # 35 x 35 x 288 + x = self.Mixed_6a(x) + # 17 x 17 x 768 + x = self.Mixed_6b(x) + # 17 x 17 x 768 + x = self.Mixed_6c(x) + # 17 x 17 x 768 + x = self.Mixed_6d(x) + # 17 x 17 x 768 + x = self.Mixed_6e(x) + # 17 x 17 x 768 + if self.training and self.aux_logits: + aux = self.AuxLogits(x) + # 17 x 17 x 768 + x = self.Mixed_7a(x) + # 8 x 8 x 1280 + x = self.Mixed_7b(x) + # 8 x 8 x 2048 + x = self.Mixed_7c(x) + # 8 x 8 x 2048 + x = F.avg_pool2d(x, kernel_size=8) + # 1 x 1 x 2048 + x = F.dropout(x, training=self.training) + # 1 x 1 x 2048 + x = x.view(x.size(0), -1) + # 2048 + x = self.group1(x) + # 1000 (num_classes) + if self.training and self.aux_logits: + return x, aux + return x + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features): + super(InceptionA, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1) + self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1) + + self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels): + super(InceptionB, self).__init__() + self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7): + super(InceptionC, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels): + super(InceptionD, self).__init__() + self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels): + super(InceptionE, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1) + self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1) + self.conv1 = BasicConv2d(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + + fc = nn.Linear(768, num_classes) + fc.stddev = 0.001 + + self.group1 = nn.Sequential( + OrderedDict([ + ('fc', fc) + ]) + ) + + def forward(self, x): + # 17 x 17 x 768 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # 5 x 5 x 768 + x = self.conv0(x) + # 5 x 5 x 128 + x = self.conv1(x) + # 1 x 1 x 768 + x = x.view(x.size(0), -1) + # 768 + x = self.group1(x) + # 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.group1 = nn.Sequential( + OrderedDict([ + ('conv', nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)) + # ,('bn', nn.BatchNorm2d(out_channels, eps=0.001)) + ]) + ) + + def forward(self, x): + x = self.group1(x) + return F.relu(x, inplace=True) + +def vgg_make_layers(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + + +def getLeNet(num_classes=10): + model = LeNet(num_classes) + return model + +def getAlexnet(num_classes=10): + model = AlexNet(num_classes) + return model + +def get_vgg16(num_classes=10): + vgg16_setting = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'] + model = VGG(vgg_make_layers(vgg16_setting), num_classes) + return model + + +def get_resnet18(num_classes=10): + model = ResNet(ResNetBasicBlock, [2, 2, 2, 2], num_classes) + return model + + +def get_squeezenet(num_classes=10): + model = SqueezeNet(num_classes) + return model + + +def get_inception_v3(num_classes=10): + model = Inception3(num_classes) + return model + + diff --git a/train.py b/train.py new file mode 100644 index 0000000..9b71e1b --- /dev/null +++ b/train.py @@ -0,0 +1,289 @@ +from __future__ import print_function, division +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +import numpy as np +import torchvision +from torchvision import datasets, models, transforms +import matplotlib.pyplot as plt +import time +import os +import copy +from tqdm import tqdm +from collections import OrderedDict + +def download_mnist(save_path): + torchvision.datasets.MNIST(root=save_path,train=True,download=True) + torchvision.datasets.MNIST(root=save_path,train=False,download=True) + return save_path + +def load_mnist(batch_size=64,path='',img_size=32): + if img_size != 32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()] + ) + else: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + trainset = torchvision.datasets.MNIST(root=path,train=True,download=False,transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=2) + testset = torchvision.datasets.MNIST(root=path,train=False,download=False,transform=test_transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,shuffle=False, num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_sizes = {"train":60000,"val":10000} + return dataloaders,dataset_sizes + +def download_cifar10(save_path): + torchvision.datasets.CIFAR10(root=save_path,train=True,download=True) + torchvision.datasets.CIFAR10(root=save_path,train=False,download=True) + return save_path + +def load_cifar10(batch_size=64,pth_path='./data',img_size=32): + if img_size!=32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.Resize((img_size,img_size)) + ,transforms.ToTensor()]) + else: + transform = transforms.Compose([transforms.Pad(padding = 4), + transforms.RandomCrop(32), + transforms.RandomHorizontalFlip(),transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.ToTensor()]) + trainset = torchvision.datasets.CIFAR10(root=pth_path, train=True,download=False, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=2) + testset = torchvision.datasets.CIFAR10(root=pth_path, train=False,download=False, transform=test_transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,shuffle=False, num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_sizes = {"train":50000,"val":10000} + return dataloaders,dataset_sizes + +def download_cifar100(save_path): + torchvision.datasets.CIFAR100(root=save_path,train=True,download=True) + torchvision.datasets.CIFAR100(root=save_path,train=False,download=False) + return save_path + +def load_cifar100(batch_size,pth_path,img_size): + if img_size!=32: + transform = transforms.Compose( + [transforms.Resize((img_size,img_size)), + transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.Resize((img_size,img_size)) + ,transforms.ToTensor()]) + else: + transform = transforms.Compose([transforms.Pad(padding = 4), + transforms.RandomCrop(32), + transforms.RandomHorizontalFlip(),transforms.ToTensor()]) + test_transform = transforms.Compose([transforms.ToTensor()]) + trainset = torchvision.datasets.CIFAR100(root=pth_path,train=True,download=False,transform=transform) + trainloader = torch.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,num_workers=2) + testset = torchvision.datasets.CIFAR100(root=pth_path,train=False,download=False,transform=test_transform) + testloader = torch.utils.data.DataLoader(testset,batch_size=batch_size,shuffle=True,num_workers=2) + dataloaders = {"train":trainloader,"val":testloader} + dataset_size ={"train":50000,"val":10000} + return dataloaders,dataset_size +def test_model(model,dataloaders,dataset_sizes,criterion): + print("validation model:") + phase = "val" + model.cuda() + model.eval() + with torch.no_grad(): + running_loss = 0.0 + running_acc = 0.0 + for inputs,labels in tqdm(dataloaders[phase]): + inputs,labels = inputs.cuda(),labels.cuda() + outputs = model(inputs) + _,preds = torch.max(outputs,1) + loss = criterion(outputs,labels) + running_loss += loss.item() * inputs.size(0) + running_acc += torch.sum(preds == labels.data) + epoch_loss = running_loss/dataset_sizes[phase] + epoch_acc = running_acc / dataset_sizes[phase] + epoch_acc = epoch_acc.item() + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + return epoch_acc,epoch_loss + +def WriteData(savePath, msg): + + full_path = savePath + '/Accuracy.txt' # 也可以创建一个.doc的word文档 + file = open(full_path, 'a') + file.write(msg) #msg也就是下面的Hello world! + # file.close() + +def train_model_jiang(model, dataloaders, dataset_sizes,ratio, type,pattern,criterion, optimizer, name,scheduler=None, num_epochs=100,rerun=False): + if rerun == True: + print('我进来了') + print(num_epochs) + since = time.time() + model.load_state_dict(torch.load('./test_20.pth')) + best_model_wts = copy.deepcopy(model.state_dict()) + best_acc = 0.0 + + model.cuda() + for epoch in range(20, num_epochs): + print('Epoch {}/{}'.format(epoch + 1, num_epochs)) + print('-' * 10) + print('the %d lr:%f' % (epoch + 1, optimizer.param_groups[0]['lr'])) + + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + print('val stage') + model.eval() # Set model to evaluate mode + + running_loss = 0.0 + running_corrects = 0 + + # Iterate over data. + i = 0 + loss_a = 0 + p = 0 + for data in dataloaders[phase]: + inputs, labels = data + inputs = inputs.cuda() + labels = labels.cuda() + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + loss_a = loss.item() + print('[%d ,%5d] loss:%.3f' % (epoch + 1, i + 1, loss_a)) + loss_a = 0 + i += 1 + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + if phase == 'train' and scheduler is not None: + scheduler.step() + + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + # epoch_loss = running_loss / p + # epoch_acc = running_corrects.double() / p + + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + + + # deep copy the model + if phase == 'val' and epoch_acc > best_acc: + best_acc = epoch_acc + best_model_wts = copy.deepcopy(model.state_dict()) + model.load_state_dict(best_model_wts) + path = './test_{}.pth'.format(epoch+1) + torch.save(model.state_dict(), path) + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format( + time_elapsed // 60, time_elapsed % 60)) + print('Best val Acc: {:4f}'.format(best_acc)) + + # load best model weights + model.load_state_dict(best_model_wts) + path = './best.pth'.format(epoch + 1) + torch.save(model.state_dict(), path) + + if rerun == False: + since = time.time() + best_model_wts = copy.deepcopy(model.state_dict()) + best_acc = 0.0 + if type == 'activation': + savePth = './pth/'+name+'/ratio='+str(ratio)+'/Activation' + else: + if pattern == 'retrain': + savePth = './pth/'+name+'/ratio='+str(ratio)+'/ActivationWeight' + elif pattern == 'train': + savePth = './pth/' + name + '/ratio=0' + '/Weight' + model.cuda() + WriteData(savePth,'ratio='+str(ratio)+'\n') + for epoch in range(num_epochs): + print('Epoch {}/{}'.format(epoch+1, num_epochs)) + print('-' * 10) + print('the %d lr:%f'%(epoch+1,optimizer.param_groups[0]['lr'])) + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + print('val stage') + model.eval() # Set model to evaluate mode + running_loss = 0.0 + running_corrects = 0 + # Iterate over data. + i = 0 + # loss_a = 0 + # p = 0 + for data in dataloaders[phase]: + inputs,labels = data + inputs = inputs.cuda() + labels = labels.cuda() + # zero the parameter gradients + optimizer.zero_grad() + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + loss_a = loss.item() + print('[%d ,%5d] loss:%.3f'%(epoch+1,i+1,loss_a)) + # loss_a = 0 + i += 1 + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + if phase == 'train' and scheduler is not None: + scheduler.step() + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + # epoch_loss = running_loss / p + # epoch_acc = running_corrects.double() / p + print('{} Loss: {:.4f} Acc: {:.4f}'.format( + phase, epoch_loss, epoch_acc)) + + # deep copy the model + if phase == 'val' and epoch_acc > best_acc: + best_acc = epoch_acc + best_model_wts = copy.deepcopy(model.state_dict()) + model.load_state_dict(best_model_wts) + path = savePth+'/test_{}.pth'.format(epoch + 1) + torch.save(model.state_dict(), path) + WriteData(savePth, str((round(float(epoch_acc),4))*100) + '%-' +'epoch=' +str(epoch+1)+'\n') + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format( + time_elapsed // 60, time_elapsed % 60)) + print('Best val Acc: {:4f}'.format(best_acc)) + + # load best model weights + model.load_state_dict(best_model_wts) + path = savePth + '/best.pth' + torch.save(model.state_dict(), path) + return model diff --git "a/\350\276\223\345\205\245\347\211\271\345\276\201\345\233\276\345\211\252\346\236\235\350\257\264\346\230\216.vsdx" "b/\350\276\223\345\205\245\347\211\271\345\276\201\345\233\276\345\211\252\346\236\235\350\257\264\346\230\216.vsdx" new file mode 100644 index 0000000000000000000000000000000000000000..af13b866e77528d2b1df80219ab8f11846d070c5 GIT binary patch literal 45464 zcmeFZ1#=_awj^q1W@ct;Gqc;w%*<{xGcz+YGcz+Yvn{*L)Mk77JM-qmeQ(Zve_*B} zBvsYkiqgtZX05$)Nea@SU}!)PKu|zHK*T`oR)kYIz(7EZe-BZCpg^=m?CqRQ?VR;h zJRD4&bQ#=jZHNlNK&bM8K>p7EzuW)82sEcm*$pxwi9M&hAttn`8s*cAp$CW&PNjl> z0@3FBue=a%b$|69Az4IG8K`cM{k-&=YoVWm8?)9Vm#&wyNJ+9=C5A2+b}BIAdQI;w z1x@6fLo-iNi%){*WFgSihcCkgWe{p{|jSfo%5o%D>&f<3ZhdLs;Gy60Ju7cBA`yJ`*;VKH3oV~*H3SK4o+S{o5Q z9Bf+RCoXrkteI#xrR9y5VJUE(_@daq_TrV<>^2sRAxm9|F&`%Jydt&RlGtvEYquv{ zuqV8m=Qo;%7&P#4)Hu7vHr9uo)sL`$F2}_*0n9WEI`l+F?nushd0j#aUI54^{GMY~ z(n_cd^f|4wlx4EH8a6pEnK2kRLqWG|iNRc~=Ao)F>+PY(U~Y25YlsNk>-nMZ<3Zz0 zF<}8M9~{2cPGE8ut^#j=Y5DyP3Z(G=Vs)9uU<>(QR_lQR0m1%dwZ5aNjT0loKhOV{ z&Ho2e_`if+oirsk^beaQKY~ttj*5AM6_V_bl-!^~5V0<$jF1IPgw0%H9-E1 zGb-~jks29IbQAqj45#8@Lt+IilxB^BgC@ropc5QJHdCc~_%aUa!|TPcZHTAv+APDH zKkZTioSH*~K)}I-JN;n^Q8bBi%nW}7V^kbZPNb>ChQ-n;Ix)jHche!c1bMXOze6YI zb-~H9jww&yV$lah)fUKbleTh`XSeDdAJs&$JsZp~)_tm9H27MfyRojKc6604pyc(= zP#j`9h96H0Y~Xpy5%1`sK)VI26>!6Y?-E7~pQ)?l{RQfOXVv7Qicbp$5D-ix5D?1W z8LpO2miCM$_Qo!@fA5(8+$t}1cbtyKfBDI)enF<*1h{sn9b!V=|J1^iitn0~($ZY^ z)!8LNK{)`2h5&1+@^U0c^oZI^=dRJY1jC zO}OY4_$~e2SpPmi@jfph}$+ z7HsetJ{?p6rULTOi+9`_zRj_E>Fm9M*mq zcx!7?_0Wl3`CpGzj4berXqk-_x+52`-xz}Pf{g8VZHk`g-STDUK46GjhXg*71ZvZ`AqA-!)NY$7sK~VNT_F^cQ757X4AVDYC zHOsh-)dIQEIoNe)bsjsGI(*)4%L$V2CnJ8v&Tc((@mo1*^HzYbGxds4oeAGnYgi63 zbiyWn*C)XSNx_czkZm-Tu|u${w(%Oak{ST&suw>gTRnDZ+2ftUU2S zGH*VPfq&;xqNW#@zmA}9iZR$uV(Ap@C^l`8m6@qqxGiF{Py920kJzQ>BFb2=2lZS0rRCGMV$EhPp9$Y~71+XKSF=Ud9h!ZixM zZb*j!DW+p^Nen1^MZk)Zz^RTkVD1P9bk&+NYr!>XbSfGsG~k=N*6$@u-|ywg&*wp) zZ;`kj9@xzjO;qvfqkXnV+j(lhUM%bzeeqZ763|&;8p!-s8&GZ#sX#>1fG7SA>z6n4 z^o#LIUC^y-g5#!8(Z(5D*L~QW6HMk4#mw2D{6JKw9bM}o%Wkd1k|_pm-Dwb6#GdR> z9fl_RQoSsxpDhX*Pbaq;lFxJ1u>RfbeDaUTG;5oQmCsDrD#eB* z!3E$;RE-ra0g{9(ESU-oz$P1q?(|-TObbpz`EMOknnR6DQj^B{l+GHcbv5GX)p0Zq zO-bKhPEF8@9A;av;fzyAYl<)GTmDzb(gsTzgy2lsOPq#OHJG9BUndfIluQam+;y z`p-G=$~G>4WW}g;Z7qi4+}{s^{aLIR?k~pe2a-=%VlV>Y8x*hyYrj9MWJZ^NDRxF3<(8(0Er- zxf+6xCKYNOif!hc^-tP1APuGf8x*6t?gfc zZ5*v=b!r?af)A-==HEDm?SelD*qlGYrXRFGD9q9_SQ-(Frvv4ECE_K;7krW6-VfBf zWq3-WSZ~bey(RM|oR;W-k$n?G>!T$eYGphLlWbKhwAkS9qyk!3mwFN$+1b>$Nwd}h z0yn55G6#-jEHDoN2defQWtS;r${R)Vn#E%J5nW-L>9XvMuc%j8Tw(!P3c*Us4}Tpv zUaSX!t8wI@$!y8{+g~|GYHCgP?`4M-Gev`DCaikM#YguU5C9qnOwb=~!-G?B3yavM zrGt|m1DTsT>-0=F+J*$1f-@Buzv{8Mg4tt10_zdz{Lu4;&L(Uw@)vGgwB(xhuDD{@ z#jBSi#0FXQPHqjs7+6D@9Zv&mU?m9n!Y64$srtDtSZ;wnKshf}HHISWrd*D%6;0_E zxNcb3?8&}3FuMZfkaAh~pvSNS?7tsyOz2?0OoN-q`0EPs1k%4jT&3G7S;8P;CePIy zq)g3Z#6+HF{fPWVmSX~ zUuby67TQAbddcle?Ds$%JI_FAGFDPd_E-zsx0@E33$CP}YuarNB~R`D-z zHuud8zHzGuy{p`Byz)b*vb(F=KZ_|^)|p5p(0-=71FR1)jb;5Q05Mr_@YrvMZ`-qN z>tw)VFAojfQJGW(S2-+?j5i%jcdXR;yNDc`Eb+oGg=H_fja9|Ha|G22KvG8dae-Sn z2Fwax^PX)L0dZ=gEgHY@N~S!R2Rd1~AHJxSE?oczstPLu2Z1C3p7}m?w!c?`6dX@i z9gjj`Z~`2hQ<%7#dpHg6)eD1UshUYw{Y+f{ZU*@|h5y*MEmTGxJOdZCDy%WuOyOXx zOjs!Ek)8lE=!*l$AqRN9p z7G)t>7LqAkdf<*=T@>=cE@>V`4fZPIQ5J2M1H z(D2gfYgfXhjf%zK&)#>OK*-EwoD{MVTN0!pWiKpEcf-~&>hPwf4(UHTE;v>~V_+)* z!bokEBv!+uxMe`nzN61)2CFQj+NNXLw;V8Ou#Qe?>d=mWq3P`nR2mFm>&IA_b2o5? z=NtEC4CCYXzB@#VowGI6f6Q36?7*v#opUYs2-X14@(OgNgP{$og*RI;Hz%)oK>5a> z9@?vfyDUrISYSxjk|+V8tP*p_FU3loECu0lvSHqg^MzAt&~M0Z3$YlH$qiMddbllo^%;hxW-)sTA}MK9UV@{WWKI5I(AOqB7^yPPKa6%LXTv)afOlr$|B^ar4* zT|RUuFwLPjRkWVUgs=S84;cCnn<0B?ga zOcPyIknCCA&y*e=iGlZXI_R~EeoF>a3NNT|g=JJ(zvYTK;>UO8aeII`Ftz9+H$y+C z9jaeXy$0m;dlFLtFH-(K!rn3x(t7W1vCLX~XcIs_6H+VKtCIkNBfdfy9cMfL$p|8@ zGzReN9-SxLgiIEC$QEqu$_)7n&oez?`6Nf#qD4AW4A5R4ki*JltJ-wQK&>d1rt`Xn zTf@;`KhY~<8cC1L*Y3n(rZc`!eLm^0aj8BcWGuo*T~y5XTTj6{O@><8!%u}7aYJWR z=^}c%lg<_a5AJerGYXxeYD4M5_c`nP%FuOBKKHTyxYh?b z`XIh3orP4v5zw(Jj6o_iAj2DB_06Y_jQ8}{?8YLPbr-jTg7}8%3Fi;y>Db`%qUq_T zp?$W#^wKa|b0OXg{j?Z9kR+!@QVmj8{ZWR5YUydz8l{wp1T%9hvX_kl3;2EhD4RaV z;^{@2G1UvUN1w`-m~kuKls_s9XoH$XmHy}+RRd9)A+9A2F-Q;KDQH8J4!vUKAdH@w z=#ywUGcaPJ?gb!f6N+S^ns>Z{IbzX4Kq(rWzzR~l#cF)&icUlhj>$!Wq^eV@WH=GL z#Ew(=bnn3=m8qj+_}_z%A*k}boTtEsUJ~UN*yv41zlK!+wS`p<>LrSx0xQQAYJuR$ zA)${*jXeY`xi*7+Jy|kXq1OycKr6Nse`^dG!4`IG5ehq&Ao*P@o8|M~+J#hQu+N*3 z3fq@T(Hi20Q6fMYP9hyUU}`RX=#C4v^be4R^q3s<4w%MH;0LK8a^B7$53KsxD9bJS zO|=qz^12(n>b`la@{t$A7@SUr(_QZiD@)Hm!RKxvFModm?O@o#mG{Q~uCT}#sB}t< zW6n_bPpaCEFfI&foX>Eu86I^0(%U*S2)w|2op|dwsLc(R&ow z&qm`BfaMv`Ka-5;>y;NOH-f<4Mqly^N7Xl`^-w5LF^to!i0*i;R*CKul1T4dC)spM z$n#9oGHb*)Y`pB@>l71~`4Lr1u-5mwqW}Gs=J(LG_n{Vs7K0J$Z`6F7X4Gs8MGHX1 z%ZteX zoEkPY6;=c^_*!c6)bqy0NKcW$rlJxtGNNMLqp$|h>X=a=otBt^sI+g(5RFUEf?XTy zxKdPxkgMr=Qk)H+n1hJv1yVS~FP&gAil8zbDX|4m>%>wZiFd*U7)pouYGw*9q?JMw z8LPL;RA6@^>?;_5B^jL#6(%1R8mDH2PW5#2DdybSWRI&dw`##j8JW3WUK998J4){InSv_%m- zye|z@LKqTPWrex&t$>3&k63?kUB?$PC7)41X4E$~=_D2!wuqHUCZ9_lPEbC|uh>&3 zWkHCbfyoF16F$bYyDBxG6D7mS#Et+9r}iE*Y|1UrSoQdcQ^+}06^;p0I`=qvdgOy} zK~$N7zslV4xE_e(`?6Q}{r>%RLg?bj{Kr2?6JmOwk^E17>7wtR5#rA@9X`Um7=3s- zziM#qCqYs2c2DoNCVzS*A?YN5Av7v+Q<+I>iqFS!_9fJ+BWqCt(4^y!H+u^ z4WDxz@DgOk!mYOCUx60ZDthWtv+J>i7Q3A64?2`AUJ(qT z?8%s1)b z9)3n%G4~$lZ19!3YL<6$n_&O)a5FD5$@u+H(mtWR&9WdO-~VjU>x%7ePC5-c*cFAw z2#?x2eFM1u(R4>L8FCmIxQ|z@!?Ih1b{YBbK7uZN(A*7Nv9dKum;YQ`JB@%KHpUaQ935|O!GY5bwfRqav# zm_Tyn`P2afoSEKRV0*xE1KcNfC$Lc{RL0mwl>~gJ1%b6FI#E*^frm=)#GvY~Ir(Yv4a<@0VEWXrW~B1tP!s@si13DVB~15U7k-@#1uqRhVB5Qd-YTw=@>Lm2Mjl#@3$3w zgSmyq&kMfMF8!Mp=*89Q#q~rN3n}j!e;RPR+u61prmQanj0-Qy=%L-p;unO8^{#yv zKy#;N1D<$?!Z<&w{>vN|7ZQbwmKjo@~~i?p6HTTi#L2)p)$aqUB_ z|3!nq@*ZaPXn-A@ub~*0e4f6;TD?-muKi(AfYnMz-XBByB6#vH=jhkh?fGBV=sy&> ztK;HjLH=4t!GDb<{C^j@|EGcUUrXKpGLwoE2dw^@2%$>SZ#g0ww2xK`CSXx=t!h>S!^ ztGx@L9x4vQU{!BlcCWEPvmc7TZ6yPBy_MzxczPu=r4nVzLcn}`&1gBZyrXguw)`$( zj*6*sEP9@Te0OMB;zHYOaYU_S{l@~kvzd|+n>tvIj`mzYS!yTCDg@Q`XYuQzV+c)w zid625@eku8w&VS;4E|*aPEGFcT00OYdpR8=PP2;fM;`E>kBFY7{8rl5w=vVR1#_(Q zlhZ3l9x+QVq-uRU;}csIeSabP-yMPu56bHEzo5~B0RfTy8$4S>CudVfC&vE?;{S8> zk6rLD44apF{=u;Sj&I%#sco$>XBe8;u~m3Ak>5X_u{HcDfK2%Q4`7zC@4U{$JA2NN1bgm z3`3`NWisnW?Kv=-?QhfMB^+qbA0}AL&@UO!=r!OUE$s}%wN8h%!R=EKw$KeT5wg$? z6A`-54O0CDZ+3^Kt%+;;8rBya0-Ad)n?au}*FKvL;mMqUtRlDz#H&3eER_SNt8bq^8aZHSjw z6k2O0V%lVTJww~B1?(V$siIOg6^Fy@N-~{YmGXD=h^`*Pn_D*3vlfqJ|Q021wb zg>%9iMwfnAPxB(DJH!~U+@qs9Pxv>c|NRS+;z5ug{FiqVkU&7_|DUM;9dEX(wB42@ z(hg!gkHZ@6LlvTFI*gGQFR5TFoT6*CX%@pu}*Kt`V^OaQ2;;XetdKOvIDx%V}DYb*KNN zFk2l-BDE?3k6en0!u%wI28kHCk||}OgJ@3Zl&u$?zD5)Z=bLN@ zU3sMqF=kAZQvx0AK1@zKpMfhiH8)tyaIR zsPmzOo=^1n_qX?Am^K9BFJY%<*DE_8{vidT8xj;a8v&>xIV1${wmtjrPk%ozSIS37 zq4o2SUJi@EQEdOl(P)wC)lvEe28~>SJhR*K*0O^M?Bfm>)w1W@8P!ljl10QT-?698 zPq{M)J@a`|j8q2?schY-y#RU^mwKt}8A1w6t$cmGBIcC~(Og zf9iaMMAih@h@Na=&(2me7kx=mayAE?C;)r3`{;E>Bp3aK(pJI`SmT5b<`5Q%7I;T{HC!9ZK&QT&RbB?8}jK;rxuhcpg($9~5X1{a>d3wh=h_x~op+vsH zWzkRyTFzRQY|68RSIoBTMPvZ1TEfvO%bVN345?zDCjF7~hKisU@tbu&GE3vj*O<)% zoxq@9xi5up2{|)+QX?#hihn0ghUn>yhgrYx@ExvcU^r5LtAvPcZoO%|eXlF7$Lmg{H7FK6PM$wa~ z4|ev4^h{S==LN~#ZGqwDHW$X8d*ZqVPpyogu{S)1vR8aPKR&1ReSN;YR?vrflKpt_ zZQJ@nHcpWC262|(d^Y36ak-(o(NZ?K4NsRo~)dol7vQ zHaewNf2AY)i&e^_D)vUx`FF%%nAlCpcH)=~0Ww!+mc}V#Vy)21E$)9L9p^yWOg9`5 z5T^tX5ca=G_rFy6zv%I$&V|#K3dVPC#s}ipDPl`e8@Wp&i+ZB2SF5GSQe!{I(Rgcr zL(tVS84r(gIs<(PD4q5$oIcy_p z9N+VH*!AoA*){|J-J&PC>ufDYww4Pu1KsUix-R|lYPOXnb?*ipMT`4ogzbgpE1+s~uVmXJ|Ip*`P{X37DdGd^ajb zb~{B+0h=xGT`-POEpMK^8ye*+(z#GoYU?^}RkBEfW$m516C6Tw;u{+l@y$gsG2R1u zk@m}1p}GR*V{smyqmHXeI?-ctr)yVD(swJE91ku$1XDkzXT+OYuM@!245@ctPj7PE z?Ava92uuTjBgWKzw>-sA+;)H2y)VvO?>ui2?(B8_xe5F3_}F`>1M(|S({*VTg5Ebp zFH;daSblNr)uh7Jwj+SR(44cYKFMNu*r(%9`R0 zT?)rG&TVbYKFvzD^UB-QBGgHstuv8!xzcFMVN97_*|WJ_DF}RPn*Eb}Jf}h5>W||X zhHRHoM2=3R@reMp^^7C$pJgQ-t$FekfU0bYhmfMFhOl|s;ndVG1}#!+o)Sl=Lz=3S zmemTQ21Aa#QtppHB8$NOD5alh9-8PU4xqr#tiw5Yb83cfN|BSStmKCmdrh@>R4SaMcQtM&E4|;r32=DG^{eY94L-s zC#@`8^fI6eN4T4fZRJ}|0f0*O>8e2%=jM=ViHCb_pmdI^^w&#hiy#8dkWBA<^@>zG zlG*L(QS%NIqGqsZOZHyEdRcKj{D^!W<}=$Fh=#gyBGDC?al2i{uuG^xD-SaI~54V|20B zns|xrs~>p9YMkT1DWI_hX~Z>8-=GN!1viTTfEHEFH%?j~^C3^YbEQ`RE# z41;nh<{Fw(tKw+q&1_&y#M5biSmOCTY=@1Wwx!$=05_oH_LHLm7; zFt_Ch1~=T?aP*99o$o#ttP>!pnK=(j1Q`;|jCu>y>N%op5a^(E28k!gW?X7R@@<3? zp}-9Z(O*kaSi6ucWzk~IL%+_fW0~79eU24z{M!|em-n@-Ed zah_XF%zb|Oej(qG&4N>!yod;}bsLxlHH2(MQ36Pz#NY3$lCWL7b=}Ih} zG!+xg6t_~EDCiCioC`9Y?kyuNYc#-}U;Jx>N}$)lxvom^ba zqXWD6-5LU^m`{S)dte&M0@&M>F6!~^5{W>z5Kp9=vXvsD!t){e{icV;3Hx6cqU2z9 zQY{M%=o+K%4wu@#4k{!u!sMQaDb?H;xad5izn`>dVR%V$lMr41%=)eGyq5ZWzEiDn zeoN&&6u;lgI^;I*^)7zy6OeU&f3&_&UWK{!t^3?c0b&@r({?{Myy+^+vo79EcHpXRHIJ4`5hs`^vm^O&)sl zu`oJjC12l=zpj(jSw1+j=6Lxn08^PxOavfC1>!lKM0OUNnG3;F0(yhc;eU@&Yu;XC z_{ryR%|T@6Kp=c(pLfhfCt(j)){i7)4t!%oo@o*Yd&&OU=4(9jcEq{D<( z&2;%540;fqh&U4V~5RnHYKoZ?kdt_(5t8s2A z)pK~}&6NiCvWcJ6n4wJjP`OBLsOZmSbDvAsqk~Vf@G?Q&T4A&bBmTC+MT(F)FHuOi znq3IWJ%WxMoP4^>WIxoIw_jKzRB(o4>N0d;lSKeRr;9I-B;9en^*3!X4 z_%2adFQS+msS%tJ72l{RZzzMU+Z+!DgrUP%h2g&0BCb{D^gdD1Y7m!AjI)$osWoX2 zC?+%S5hjfwu<|2RcFks=D5F-P)5ur#_9MQ?sMevL2P$v|RE^7@M$apBh4@zN&dZR( z+M@@F)R-{r+}9+(KzmAJ7vU89ED@U0Ij9IZf6@)IiP4daL5GvVj9`Q?O~F7Hk+=yx z!WQwSEN$NB`S_rVA;xE~r62c6u<%)X}WF!CcM zV46m|Yt0I71XBhO^5G>Fhm5N%I7i)w?ttWshU;pVX>kidJ*0x?VHma96Ljz$itF= z{O%wbOi4JTDvne477`Y2L#eZg<#Z2((Y&ua$R2)&&bDYRVE^W;} zb9j{J)I~n`4WjphlQ`VN9;=r)gD|Ntu$6hVQzt4zqMXvtn?_bAu1>Cq1L~q%;CSm$ zd$kN2w5F~j81i!IfiV^xmkC-kMnF(+#Z!#wmyR57z>&=P-$CZR@y`&nGR*xYH(%YJ zK_S)wskDPVMT)wi0^MDAeaVCDH78f5&|j?u+e-oI+k~(#gS3k*<9~kmlU&lB1EHJ| zSmKg_ap#j+VUsx+u?JgNTA)E?5K3?tntE{J zoXr<|DQq1AlQF)r1RDfjZ>)n~{LC0ptN3Jkc(g>}2=Dv}<)wn}YXZwuJxcZn$PVUZ zS3D+WJhzEi;_A{kj#R>Uh(rLwqVoTaQi{+_oITH+72-PPvg}G@#EQqjAT(Cy_tt3%ctAgl~Qy(>xu2=?IL3*%?sPC%xjng|82dot| zR_})lnOJCbCYTrujVC#cW-67UV}#T9?n{VEiv zJBKHEr+;91b)nxO?>pG%FRdv zc60TD%zGt$bEVgnaMtbA^f{9jalHfEOnHC{@mU8Ec82vl+&bE(fvS!*$BD|}>phzC z^)*@xX*-VUh7bU@9~q=WjEEGWh)6M{eC*>NwH7B$HLE#7T^vhoWXio04 zxrl3^=D?CLlsAQG*})dMDx^}tL42B2^Y4~WGq=Rkm~mcMdbDK9*Ni}$ ztviFzMruu2o|+Xov4HwXa*J>ClnE+%0n7lTCC~5&EAm&CF14vlXD{;Am1OsX!C+rm zlIhK=c)aWERjWQVgd%FOb0EE1-EyBhF1fQ9t);}394(HZ*cC=i2BXW#l((r?Yvqw( z5*uYSuo`OdnisNkxVk-joj)~j^@)lb5VVzB7HGMsv8lO%)XH?Yf{r&T@{x9EdDA?Y ze^R@sYzspDJt^^&FniYZw*F*i@vr5zuSw`xBp~rjtZL(Z8o8p6J^8&LQ&g5ll-i=y z8p@P99b6oRmBsn-^7Z*AP_>RF-UpkziLGT!)o10Kfe{Juu$2Tt}7^9&Th z2$E96(gv*W>!<3P5Imt~>}>(DQ@YOGkJre!D_uP5ZGG2!SV_cs#DXmc;(CM01$AaFk|$NI%siEq{e*SYF5b&sdLR!Hw0M6o=3b=@>? zOmvlNyWJIQAMbdrCnMF)@-#!$I6b!zG?*Wv%`7`hhY??8*@O(a{mD|5#T7&FhU_za z+m{}=bx012(rZmEnM~b0b6=Sa-P}x3ZA%uK5Tfu_8qKWx=)-H4J%#yAZwT<_BAF>~ zV?!xYw8ilqBu0&G_k@}~Bvud4H(+C9S(Y_3+C6!JN*dWFN(uK}6Ch~*~vog7f z-k@^tBwAb;jebqK#0otqpi+)a7hV_R!12a%adXBPCtwd=}#RDLb?$KrFhz6hUT(ss9-gSH4UmC0bx| zl)C{UlljsOYp)?}x~{sWmy0OW48g0o3^5FUbuGE(vzlcMB2(PCsO)0+^1>|amHs(1 z3AIGvEKAfnkWE4&jh!L(E|V%XU9zKR8e9)P!ql=W-=*8z%cJ)56LWz(f5b3A2WZLN zk-W1HPD7{g=byBy^uv3v%*Gq%NkXeQki#T;2a^vjEb!@fCCFgoPoBK#?1>&HH&z}G za_+8Dq08HJkC3~>l01w&-I0eO0HbV`{dwV4_*k42sMkZVT(>$EfIV|{TY@xqn3tNf zlE@i&o0`3}ybusX!p(5j9uBIeTmHfTINP~yj1UfQc%~)F;psQPRb<|Huaflu|)xo+|i8Hh*!Ad0XxgGw-Z zvg52#0W>4z-HgGXxrmmlv*Yp6_*N_vTh|Fu*OoX0!BnxH3uE93TTl8}NfO++L?wj< z9(YCiz#2Hg#E;$|&2CUgr0&VE_=U)1Mc6u7f^|}{Z`^vvrBx7X;UkOub7gfoAM98^ zOr~a;AhbB=($60lhG`0%FcT=+Zpx7z%|WMgp!5I$+7 z>uJx~Pv5Pm2{CWTe=Cse$d`>lZ2?1@Xg6NDVG6bPj<}I2d7(QKucFDyqa?vB8~R9o zt9}t3J{3wQ9l5E9CUrrBBl3acsd}6^|D!XLOkJ*Wz9V4-VA!1mmeVU)ptlX~WBRj` zVNFijPopRN?+>0BwZMrG)e2`~K?E2YH@GQ!EzY&$-}L?A39(YY)?aq@Jvtl$oaA9@ zwEYB}PzA>1w4sI$dS|Q1Bop7x82f$?3mG)H0~{?7&3i72+FA~`P^A=_Bb;P-23C9dr;2_|#>%gv%j;adSZs9}rl9 zp98=k!Y$fvQTYW42~8h0ltNV0^5Yl%Ddh%kINXi~60EjvAfTh|3nRKfl;`04Bb-@& z>&l0nEs393D0nD=UbZJRC0hKT)1D2llrgv9UoUWFYjR(Hg84eE59iTx7(k;dDj)|? zvD!M&PcDVN_u}e;;X>q7Lc<*ue(ZgH0k-<|8z7NOus$ip+;w8g%i*cRlhn(3k7`d} zB5Mx4Zx+R@a{FHb@|e0 zOrTIAX;<&6hZthLN6ERe%}K07=2`B%+5^f{kK@tm2k#9mvg^vP#79zGy8zIr^v#~L zF4FMzEs-W8ZY22^hLSS&vAp%CGfN>Yc`Tkh#=SVRj4N{p~NS9&>bBZP166~Jp1 z(TgE~2G(gczd%_piotOK^g@t&Gjx+S^m{-yp*pL=A1HQJCbIoRlE2x!wdI*4WCTO@ z##v@0S#@O|zQoh3$~E}e_WcCoV{oAX>*?}ro=3oUM#V&oV_~YgcD|f8e75~y7mlU< zPxWH2gFBos0bJd2;-J|X;_pUk+S58u_cBTWweT52&DsCF4njs91TX)azIgxjp#MAT zhxs4LTK=29Y>K1#)+K*4$r73=lmB@cv~Agi0`^M4_!%^0mrFCov#)3q_rqASwXb(f zeY78BLsazwbLvz_zz>i$H1ck*7{00^Icf}vi!%^D%I{}#w>VJcd{-N_@IIy9rxVSW zp~SrZ>*tXQi9=LLP!OB!tc&g&a13vS*fgfjy;$Wd@8YY`fOlmbHrSf1QM%`G0~Tg4 zz-TOsk}b3ymVtyZ1qXAAWjOx53TjapX>k=Cz6WhG1z6gI;7W%gR~|nbil#zC+1rB| zwmW$8KGrS_!OrlQpWGnjJ9q&<>YUC8<{&0y0UdT|CP})lUy)(ROm7efgrA7m+8y7@s7_+CTOv9rJ-w>i6P8O3(v~FQt~WZLtHRC)cIka?90TTpICA6u2X>%+Nv$&KHe| zQ~=F}ukNXezF&Ugr|j5ALu5G-LWuAARPY-)Tjibw{0Fu#0wnQ6pUQ~fn4gRUs@voK z?w;K2ZaD%6n~Y8_lK@99>+9NOQ`xJ~_gO6Su)A?%xl_Doo| zeUbwbKwYQ;p?eC?PZgzSMP*QcN)9>k){^4r-a9;UxOjMr%ZY=$u0dPSPS%1u+s$51 z&n{=LuQc0IZfDHsw+^r{*K54Rm;&-D zJ1Y3?xyWFamd*5M>xofUbZ~Tsh)P%3WsTfqyykBG?+El(dO#$AYoI06iA1XAm1!^Q zlze4{22M5o=C)+P;}Q>(t{N7B8_W$^T@6^9J=L-T>jM@PqCTLRdP%hqooh^!Y0O=| z+&D9NI0;f~dnVQIL=SAqf6*NN_{0HGdzhY&O}g(B7PtTmxW>1UN5dZy1l*A+eO++= zE}>(8yd$+C1~Ms)&QGoXo{j>S26np+sS?(S88!5ZbiSP7g5o`RtuIW8X#3#~F=j@r zlWFL(Ql*ZMN^~E>ekJQ5>mjOq7wmTIGuU)hp(1v|n|pg%&Q+O;$`AXpraW!L$TdJG z&>uRe&2OzuplRG&%E0)xv9e>N8IWUF)D-i|Pl5oSGUrgvaz(Q=AVnQq!A!+C)6KKZ zIORPropWibmnIMX(9pI|5p=r1Ggl+yWl-(KinA=WSn3t@U8-xn$b zTI#K*0u+x0DpZ8MVlmXp>F{{^iF+f_a2aV{eorh(9~i`J-v9BU!>!vkUom2BmO_Zk(o|A3+u?n6vKz8`g* z6fu3J#(LRyP)MKVASyXarpnR<+Cn3Z=4EGvfB-9)8ASH25GDbry>bVg5+MJ;aW#2}qJx62>v3*V zuIpXvs#;~PE;l>Bnt*KZg1-4X4@sN+INNtV1q93>t~+uHRlW z2ksOG21CHb^hQ8n0ySqB0e-;U8JwEv=M~`R{+);U#?gN{0hEf)3!?qTyp-N`{?S(L;GO}KSqjbn!Pi?r#no(EqX`5E9^4@~!QI{6J!o)uw*UcxyIauUE(#p@SYo2ozJiVfT>X-1ets$ZQ% z^7tWvLOCuXOu9lje*j;Kh}@tPTA(CwK0L#m(~UM^?`x?9fh#4qP?^oQ)yl%(bZMvC zxAA#%7u`?t^RGHpumCz<(x>cGO<~bGF4$HH~~~m5k<_DucaJ$prV#M zEgJp@n$HHS!`TL$=o;l4)MV_zap@RZbt=l!>UZ*?>B&=iJJrt>KI0r;fO#l4l115S zqtd=JnOfj4X2V2&+Q1F=)dE3Q$!_uPML{@L$KX{dD=32qsh)^G<|i=VGD9r@lnX93uIZTetu-3!|dCO%he6;NuH5^k-K zaK&^vy58dw{o=&sJ%E_wyBaCM!d^@Wl{7aSUTj_R zxvGEhe13YW*Ny#L{LZz#kl?grztaYy1V$3BEojW;EjC)Q`mn0pSYf3j`Qd zdXj{ZI-l6yrtl}9+vlTJ-Uq0QjwU*9pO@vtljqYLX)(-`&5uYl20zZ9Bff(V?$D|` zlG-H29iR9T+6k+Sr8NgMLbW4Y9dqDH#L0eIW4pY~iVss%`gpY5d^zj0<5AeSlc$Aa z0K`K4m2~+>DG2 z`n2E1>#j?Dd~{tNN@SV}%wenCw%5%SF+{o0@I-0> zjFvR0*+!4XMQAZN{teq1XhPU5^l)H4e~9 z7h&s~yI57UGJ}NL(i3%X(JnF=jW0VEiqPs2cb5(nLjjQ`0WZpyEXkT98k;Q~6u4P? zxZ0(;P2VJ_cz1$C(9xwlsH5e3cvy40zm-x=Y?O&7nZ*sbmlg?Lq%q|ENvzLj3{WYa zXSyK6b|mso{UOV{QT0wkEkAD>qd^?LP2xy#q-rn&Q_I1yRWz|u?ho9YgP=V^KPCqh zewzF7@_m{?q}IGqhR&W>yTbQ!FHRZpV(=RTU9shcP2&<{&Fbh{JxBDIekW9Yn|HG= z%7lE(Jpz|FU8i}%x>NE)NW@#0$(qaE`q=iJgW z>Bep7!gnvwfzXMztrVSWvcb6n)PBnjVFypHqF9LZArP%=s5GPdwwfAFL^Z;0OzN^I zs^6L8xeSuyx-?$O4g0BqVjt<`QTY!vY4(Yw%d}^djz7kFM3>xba(KtytyHTc1a3a| z(Axgk_mZQlFs_Vn(uk3lxjh40{fxrYg{6jFv(AoxcITE-AnkK6evqA1CcrhXo&EHi zZ_Gi=&3$?}^Al%~K_ws{>K9?^wF}j)_qxL|Hvi8y&+ms~&5t<0e?+7(7RHNNgu6pw zUx|&iFpT)QnHwFqQ>ey7v=GNV%st&fnd&Ieelgd|Sl&UG>EjiJq`9yw#Y#8q(E5JG z^1WeK?R}=JP+Nzwm?ug7?uG?0FmfgVuKFyiT7{X->IPwmhc{h`r7ij-UPNBlf}2$0IRK?c$+?Z2P=~u^ z0|WI?M^KNo#rPdiG?tmQ7VX`+`k8WLl?~w;_P@E3zIenoaD!Y)sQ-ssne|n@EU7J9 zFS23+2r69knqbL2`k{3%3sGD-M>?v)#JY61CUb@URbB=e;n_2hE~mxM4tGgcsr zsA8OmhB<~K2F?yFPIg};!Kll~`flj#NzNyrZ3+a5^RN8RA8moLzd!46*`}83I1##f z-5s%E7|;#cEs)zqY;uD;YNT=4#_skv6d6B5is|GF`)0<3mDqQaM@y;}xAh*-{%Tjj zGi$_l6Mze?-8C~{k+7BCEEC6ebIXr%q!EPs*&4oA$NaHs8Z}5qc5;|iAm+?;S7YOD z&Q~IYDeaiPxjD(I`Gd&@#u5UqsleJX3TJ4m-x}@DGwRhK$FA>ss6X_twl;3=*7wh| zBRfAy42@<$yE ztsYb3G1SnkEo^S?J9K?gX+~g>L+{rY(~m+mWex1sIR|>AK@glKan}lei}x(~EC3_e zGah8Z;bfMMx3>b<+j%#=+&^f(KFE~2w==~PfAeUEbIa#|O}}x9vUmQl9${~Psx*hV z&uzzF*Pgm>yMcglym_>hiyp;flkmg0ARQM785ArP)3)ooBXI5P-l|UdOy+);9u5ak zjp3EJ-uDF$BDdYh1S4anrw#acPa-2e0Fm>~RwQ@`!i37EyaEA)Zyb5Nzs0r3mdP0 zsAR^f=+=`17#{PRYrsPeq9m50NRjJ_jeHU zZhPrkpYP647KQWJCv1E#8Mf>c-uX5T4$4RPE<$h_Rf8?Llh7|+hv~Qp0VkPy_ho-3 zMnOCDmh4Z^VN6B}#m6u3_<#T&ys28N;VI6*G8g!2(G0;xxX(KfenP)938RM^Sv8*6 zuncPW6`{Inwqk4Ld!mLnsjPsykiVBBS#=KXYL-!)9f5pI?r0dig{Gov&zJ1qF0-Gi z)_KNQWxDb);H*CFia=4u8y?3W7fqM(&J(vZ_p^{8jZE@b-tPXWa4z4!MCsu4t2>(y zY_`CQ1ms$!)g$;%eO03iunX@;Xv*y?G}H9)NDOwJ2q&j1`(w~03#8tbm3dO~;HFru zv1}^9wbG~Zpng9U7cg(=PBU#nIUo?|ZMm4GT);YSh0L!Tqsm;d@qG?$z`vQ-e|RQ?~{ zYxXx+koutYCM)VP#vWhABt$H8G(~^R_UDnd%51+8#tw{?d^4rU7fqNs|TZZ$F^youKCU#XZ)X zSNq^{C-7Fim6-aEHBskOB0jW!oipExx(qRH)gLxy9e1Yw#;;@hF6?kb8DwoegVxvEUDrj}qgU5TW&>G2#6J8SMvnZ0r}^YwHvdu!_!}({_o%xZG}6qJ}5;f4cb{5)|QY7f*#28eXR-}3cNs#Z1m+~ zi^@Kow#za%ULjha4F?Fu`YrRDeUrG6BQKPOXwLTMUdvkZ{7j`*aXhmg@CJX7aovEQ zhgIcbXhsqInZ1{&87jZ{vq(kKjh<`ju0q>@r{T%Md*DV<_@k5}4g#juVbh`rPQUBS z3g0`6Jf3uWo#CK!B_AxD-%W)Ee~wJrDu`t#D3jER4veb3MiRr5!Th>lYrjBn`8)lh zRBgLMtx22-C#Sy-yWXcaY4L$}50Y9U7!2{$c#1&wzPf)JB)+QnPaSX}gmo&gfv8a9 zKSaAQeDg`%xEhm&S$rxV4=kWGjJM~sv6p)x6hd_M23H}qBJ`pL(HzZi$JIaQydTqd z#DGQljOJ8&M%^>F+p(#*Ci-de`ItS)^-P9t?O{d~M{_fHU86WI@9mJgW`nI@$2oAD}b-VV)rV+OzlEThNom6UJ~Hq=s}snEn3VHu2!A*!n-A+N%6R%Y&= zW=nqJagu2ricOU4vk6GH%o70`ikszI)kj(31X^JB)!?c~yFm>%e{D!7ZcuEdj$5v_ zD3W4x<})v1Xgtqg4+A_0-xrYMSL)qC>>=wVjuyQenvdNy|Dxq>+ zDjZ7>Y2{W+fP=d`w#3d~w#T7dHjKRrKY(s7F6w(oo}fOAq}6#&GQ4=9yF7TI4f^k> zh(?F@5(z>@InZgO|33#g%PT4Z)fcT7ML&7bjrq*wA|9gBoUmc#gR8{Qea|aEBIc5V z5Gp4jB1!!GrTci+rH{0bROCwp8mhHC;G@>@5W;902UnAc4ME2{D^PfexvG*{95cEv z=f=CH=8W^}qqY9!_2X{lNRM?&v>|^^n6O4j8?~fW^T6L&p2*)=o~a~8^4hq3ZfL*> zlHI1igF>{S2%I)w$IvzNd}TgEaNCI=SypNum`p-mDB;x%AvLTliIBATn(-&=#`DdZ zQ|FYPGBHbBauU-1z_p9&`GIf=&F7rMtZ=8Wv|dbOv~v2!Gh)U;d~i$!hQ?EWy@#!+ zpb~FRKAfn}{3tkbPkRVCZV^hO6^;@|h501O35EKX?LC_e=s2-_yl&^EKAEKh^NocX zF_acOLX2@b<(*e(XNkqY{z;nxH8EOfw?8pjbYK+~W8(qgq4eJIAJShw8IQDVS8#+D zTdhVWy#L|{Vc)LVIZk@=Omu1xSTu!CJG&LM8awrp8n*AX9WCTp^F9KO55m9ghF1-? zLk*A`K{m2C(g4UvYhF)pPxLze&2T13edjVFgp3ZVa<`NDPwO%Nru@})^|qBWToy!S>7JWsp@*MZcW;zrz8}!xAeh4Vdhxy4dnLu zkDhqXtoJhCQcxL*KTidn)4?ezQw{;zxM)$0(4oAcBJGnKmS`Mjt|{T8tP=dj4e97_<|Bb%YQ%XyO^Tf-}WrQ^5GY~ z2hY_k7Jvlg55E{);j>8@qc=_lCXs?gd5p)k5~PeOXd=j$3G3BJe~LVm@|B(s2uu87 zFSbGK(^6o63zYMLpCycFM!lTUzRdXAV(AcwPZpFPqmO?>fXHJ71@Oo2`7;+)@zRiF z!}@1V@)D0(e^K$mJI|xOJVi!>i8;drEkDST${WVYAR<>AdCruffoNxkwI2Go8|lHX z5#FelYw)zw z_bJWk0q7XV=sUo3KYxp3NQ)Vy!EmK6C73@B!|ekyT8L$Q0|ATS7jO}|C8el_(;Gg3 zqIxJi{d9v}u*4sMBvSavQRh5&KX%wqj7A57$A9eX`j?%}hj{PfMlSuqU0r>YHnl6m zBs;c0Svn_S$j8}`N4FDOUCc7kR&?HD%`TfG3Jve^Ky51(*zvF)lWV37Synw?T^xCU zE3>g_d8U9vgw)&^Gv$Uk|IGNc%xv zu)dA}r%uf8W_sj`y`m0{3LVO;U~7ZG8l!usbnlMuzs3Pz$87gcsouRCiT&_jk5#@! z0D$Q-Hnl@ck%UUymLflYYT=v~~Z$u5^cPs395&>qb<|*w9c3-er4GuuwTD8Y#M8 zkNYgcVa0*qW!{BA?CWa&Wa_$M^X$6z%)Df8Vr=pHZKuxeB=E_HDZP|$@#St5zZK{s zo+;Z??a19UpN$c2bIs{L54=xoYGOsX00dWlaWvJGA@iyQ70R=_ILZlcnQuO8_Q;#2cvt2 zHjtf%sg^da%?-!8`5x(qX6~`FtukB>kskifPVTWUE{AvPI%280)lM@nNi7S9mo0Zw znfcm;`v9I+xBGo#*1II1&{4L9!^dZz=gE_o-qx2>pXbUJEHCNw($LXa$37r?dMUJENT)r13id)uf2H!s6I1RWPH>+1HPPR~0QS*&gy8B?UIQ9yo5 zo}6;aNQ}`49o+P0PIkT}UOu-fnz#jhF5o!v&2(?)_?|1H3}0CJPWkM)_GQGgtckK4 z&#rk8j_3>7l^6G&dnHuzLD$a%eD_fVgvh$|&ZD#zce~riTURcwp+o=4BdF`9`LAxL zrsSW{5b?*=u7JANCXQEQHjvuXOHanmPFIj>^?>#*)`vRVRxioS#r_%}mfY3w9J`lg zigX{aq2;~|g-y}jSPh@Lx!Kfp%P8GBN74Be_x-%5^y=BGNdcdQ)?Ac)iziR}rmM%& z<^1OC^vv8RjzFK7OQn)$T#wF^Ng&U}w91`*S+X|g*7N4_qvsyR5=UjfEB8z5;}Gqh z;h_4Fuv$aHZcw!iQw7s;^Wj~&`&vEIY3Lag!5m;16-wuttNBv9udPYFu6pBHwHfxt zTev=X4uD2|XO^BD8@X?JpM2bQ2NCBpa9z{_!rLsi1AMxs7mmz|8_{LFfR(K(yjq`F z-|EOTIp=h_L!u|^x|iE?&X*s~Hyrw|F8IsC-U2*rq>iJP?=dVIVAjl2y|CJn%YyUt z6*`EmYvPWz&2!$(9@OofLb|42v;%Cv*W|>UpQi&ChXU@De;{`Ua=;|=P9Nh4t!s; zH!?oshi|4~lr0u-KbZ8T^7~#-7s6Jj1yE=yELx$^m`TeBb26%~J?)`*Z%QH1FSEs~ zqZDKN97@Jw1f8rWSoPE4$OOfgZ7rNJKwOrHV}F)km2g549(!QXt*bRhQ-K@roy|Py zU9Y$-D=1I2O0zsKfn2DCNi-54Z5hihmPgB-I-8gi`Q>D)4mEBMNHwxY)O zHf^b^9^V%hn;S^7Pz}&*+*v@^Jutc|Y3C4|V`l9jTYsqSV5gyI!#Ra)es7}z!7yc4 zDQznN2p}n<;wt8$JYuSNftbR{!z^}XDqb(GnosP&!C)}qOyERPfV*N zbbIc}7q3b9Rr|qp@nf?>NM%U9;cH!AVQsh*f|uBGTT`AKol+~RBOVIl({9E!6N3m4yC9iG>K;8s>{} zk@g4CL?`~q58l=yK}gJ-D`phnjo)d1!R~>9esJ0I(QB`rSm*(wb7M z5N-7R{3+y=H5H(kD1%F+|MLeSa-hs%#~i6!(cXX%=uwb!=-Na`np7DDeHLDHq1rQz zLr#{aGEAE95%2=}F~%Sqr!kC$5+NIz!L+eicB|643B)lKPgll`toliszg77biVHZm zCs<;{iv$rGsx>IXGX~s>dv6-%j&7_p*B-$Duggi;tQUAjE8-sIeobY4<7J?47 zS^m_V(lxn}uHoYy(ELzJN5oA-modS2<&gVBQyXnXXdvs(pbNiC`jM}8)5BnOVE(Zb ztsrmH!hH-%p=k|m&hrg>L^FB^qs32_jI1W^NzNUCVQ9!@h-C<8mMW@x!pXB`iF7Swz=}Wcr*Zr=o?(P;EDE4b5K6_=`oNkX6 zl*&YxRoKW=qd4XvzgAIAboBLMiY%WW*ZgKQ`RNp&)%5Ev(UR5RpW=0%ub(y=ki``? z#fC=do|qrX${I2?RhTk@YW00>4-3`_UyC?R9&I^O7~2k<*>{iW)!L5@zyZJFL)xsS zt}^iubq6FYAD%5m7yWU=q*$)+7;6x&?{yB_ij7YhsS)o^&5ceqk#yokVgU_7m?V(` z=a?kA%nZQmiNX1?XYXB~`E2u+$qC!vEFn(U1Q-TVEd%)R1*HmNL)cXPtugYV1CGa< z0ka(Z*VIGl>7`YFrT}=D{w(zBIjj?7^a>a{Zha;g5GUHMpz2BB|JA*jacvg=ME&>v zYUq~zv?+)-aHZDSA3}lC8r%W#5<=9eaDV#;;S4cf>sS#;SfUJxO)J=P3~KFweV`)g z0K!a^;!=CrKx04cS`?%3z^5-lxo{_ztry6dA?Rh=EN5T9gGY_HsPG#?!-^MgFi4%z zzGyb@-MPwxu_vwZNRSgu&Ra;v*m+#X32%Kc&6<<2O*2*UDHlusDgj5ST081-Do=e~jk-PdcA;K=Ug=w37uJzM+5=J~V2ScHwv>0QBA4qz4&g zmJ?Xy5$pCk)ve}9m5HPU5$MW5LwKp5FpPal_O@VN%_iVXW5c}VWMhwX?xeX6k{%v_HI}>@k>>P<;yX*quJM4)}t++UmpyRzkU>~_85$(@`}&~w3c zBSf<*aZwurV4%bZuXq+LY(dIfP%x+lEOX$Q6c zAIlzAP_YrU{#&wlUVsJ(rfyn09O%b}1!6G}MuM2I%EE0#e(K*7LTw`4)Sa7+8A0+3 zApl3Y5HtiMlDvqkt@QxxY5U2${#dK5DTGD+s~9909n+b%h1hMF6igDI%GtYpcxJ5( zX4JQ#T%R6EZN_e1lLqgVwRGq-lo$GB-|6^S?&k{F5Mz)Px#}k7dvaZK~kC-#hCsZ_#M#oe?@>IL>O>3Antb;odTq(pA!fSJT&< z*IbY-vI<$pp-**VGdTBB=(xuI3%`Cffe@?#Y5t5X^ldTHIH@s#>C>&21={||%jVc*MnbJHn2y}x z-2B+9vC4|VhtmOhKplG`J5#khcT8va|Jpt@cm3Qi5!K0cn~*>Uwb{m1!-6pp$ydq( z`|~!BpACp|PXqz5S7nP&z3Sk3o_%x~)fSP*BTCh5UN67dRC5GerjN844Nt1tAH*ED$-^&U5Vi#=lpq`{MKdg@oUal^QzSE?|6 zTme*YF8NK`g=BV9vdw&3Unju2xiPoKmr zgiAGp#Khhb0t1C}M$ql+Da*XVoOptCAz1(kmR zy};9~(V3q2+M3)UNxG-B$>eBy>9?Z^ErUk$FCWZqTs69LotxVt1a0i+BNkfwU$H!v zCPQt>pVdnfW8bCvsW)-{A(6f^jF~CA(PQ4wDk;KD z&jwe?9Eqb7Fw$L}gCRyDqtEI$hgPCZF(vEE|}uM=e<1 z!!YNRe*Mn|_F~#?lxdGixE#`*B(bpKF@xV zMI@~X?89DIxmyM{Brqa^92J}{t|CqjAPSkE9!YDaY+94;`RpbZC-k;%1zgY?E$;Nf z3yx*q^j||Ple35;#daL z%2YF&%IXhcH5sH3(xkwdFfs;io7x+~MKcKZe8TD^GMM|^bTG0-u_ayC?qPzF)<35G zuVk@DtKve2L0GWcX zbt_xKEY+|(ujKJI)v5Z*0^QY?(@F$)3~`!WmLXjJErn|lnhuT)(d@|=U%8^+s3i9s zBc56*ke>XXL3poy_^a4jR+Z493VwyH5ro$N+D`fHX=)yAP?y#AB{I~nute7VQd=^T z4i+X?=xCsKVYZ0kN$!SbaU64`=*(l|J{UeVXZkPX1gkfoKl z0Dush^P860@ZJaJU?CUMqikaP=a6#2@}F?f+}}1!DBogW7jB9H zBZHv_fpX+I(5Dcd?{(WSPSLs&??67n6P?_3GB-cPQmK|OmDA}mId2xv_Ds0Hc_2I? z&d}J_s7^x2oyHz^lzdX0p>^F#{OCefn(m9#<9I4RT-ADcpBttgjqyd|Hw+xPvSZ`P z1v<(%;AZBb}o-K*juoc z@8zFcguz?P0p~x;`Kuqf7m#(MXOJSlc%ggL7gSk*lNtS_JDF$`zq|VhPrew|9%ysJ znXR<5tJeQar;RRMx1^zfNfGk#xDA$4er&mln&^K#Pz|x+aFyWXqvZ9@-p^mqaQVwW z0}@yVQDV)P&y7lmS{AMb7nk%=yQML$kZA%QZnZaHd5u7g@&hHX7Qb8b0X)T*c8wQ< zgKdNhbqHNlE2+-;TS7+wqR-C*G;1`F?2us##yPS*u z5y1Bq}Tq+bqkfnx={Q%dVe~cJe1u5*E}S+Qjh{U0C5m zWQss;xJ~=&s4O_MRD$>>D|R7laWB!4t232n!zE~XEN~f+wq=-^7{1;Mo@R;)uL5T`s5%`S4@RxS;&N zPg~XJ+Bj2eUnj`bXz=6qi-uX^OXkBmbx9l2p<;Pfm`Ls))yh9Y_WRbzg(UjuafX)8 zP8!Md%G$%y(iz73(j+yfeLI-(Zi!e(7Q>`WVPectS?R^Xb^gviMGUvc9X0mqt|fP09z?9t>c|OYX;nZyc^7^4$Y~@u*t}@@&FyA?%iiCjQw#9 z-QPTXG}_d?lfXp7T2HQ|occJRvYi`&DC;3si}Lj%&*vg|%YHtD zhlWy1HZNqsY`GkmVli#6YIre1c8xOX6Ezax!CINALebLJsCLYidfD@IICeeX#Fuu> z7q>ce;& zHf!Te8}6atWJ~*`m0;N6{nGlp;{tlJxX*K^?#u0A>&w~6^HGrQ)Ah~Elh4bon$LyL z%Qc=`ZOr#{j=p>!C=>eKwli68VNB(yrJ7MJzY+Rst^1l) zJTySurm*-tqO?hivJr!-v3l@lOxp3tOoIXFW_G8Jyb3HO8lYdS<4Ffzle2-P9c&LC zSCidlL^#(q(R%$>gcn0nKAV64p1Fn1;o4#xeaiF360aao8at>7ZaNiz~bsB1K$kG&Lnp+DD0HMkZARSk(2rR2^gggd$+aa@Q#4P^Nb@3 zc6AR^(ItCg+hLCA1B+HSyr0h(uR(TG@a zA>AbDa<{}+g=KyvW4PM|ZcjHicR1BHJqRv~cf&>qnidV@sXI1A5&+??-TWe%nIkMb zQ|Lb~o-_m{pHa~Ysb(@nKrp{hReGOI2_J7>Ff_tLAKx#O0!sp1`$fVqr0Vm%e7G|) zkpZ&hJKY);UbvRgN6%#jCGO%m5bDsyDzvNB!k0E&iVaJ<4bdHHkR^YWgq{Xv)(<`ql zQ%k40mmOHnt=f(u^0lbk*mGR#b5_CKT>d8!T(>Cj*G3fET7v0@s!XbWQFEKdTC-Te zJh@7HWR3~C98&C5!!KS7&DMRVD{aw@tjW02CCcxmpx9JZ0HMi-yBskCKLnv!xCnL& zBQa#8(Z@Hupw{vjz!a}i0IE@K8o5^RJZfeV4Ji~^fLDH(OqmSPj5ca3n0brBh*ETO z9dZIT9S0NE&;~)u*T0fKl>@z?&>^RVB=Fb#hvd)2W1-nr-D<=D(8B{wM+BsYBL+yo z(s9}_dcfva;D=Ck!%~NfR6vv*@8qO#uFqfpvvfCz`BrSKX3G7TK2qgWWE}lAn-EFM zJjVq;!CYp1s-iga;ilmjvlH7imztcfWNH~6Q$ zTC7V!5Lti>|k5cu&X;WskR-de*kLigNW3E;8 za6s}cA8t!@F>9-Vn`D$8q^B8GD_L2#*;}fB6;fW@2~`*ItW9mum(ulvWXA49;R>6j zF^|8k0v!(&n#uXmQ#skC<3a@@Q^Dg;4MJN8Z0s&`^7kPZci7g8o=C-O#mhtC)2Bu< zgEiQ(zeeE6ls5zrUey5InV$wyMl1m4~?oy)r9Hp}@;3 z78B3Y{kF#Bl4ud|Dx?Lq z=yKt;0+0w*`=krsLWh^ssL(}uYtH4=*)`F{9brIxLg}SnxE*ejDz*vv=0~<@r=oWd zf-J$)(C5-=I{6(@%frkOhq=gsAnH?Xvu$ep58g zE0lIi4pvYnMp7sJ@eRg#`{?cp&#wz_>jD|*56D2>7@BnPoKEG~Mk z1U&`b5=6Hec#E{&255yfAy|Qgkm*=pB?=om&5sSgLTWD+Rn6vIHG=j@^I6(rKmfU!mrPwxz^iUc(ft zq0?Xsp>m87U9(4UP~FrmJ|ICxZ%y=V1+|4sdqSgsN&cuGyyu9T(9k&`IH=a*a9L+1z35Q0n(oByhn-%{Qict#zhd4LPYnq095iC+-EmtX+?^4dK#%!cT#N*KUG_)?#JQ1}L@tv~-YscTP35180u-dJccm@a_= zjWn7ug){@Nf!eFR2F|nn>fiI)D@a0b=dZ1Y;1zX2aP}X44jh4x5vdIh0?Kc0-Ad0t=mBb;=v)rXgI==3t1B`Y+5toYitWP$U;LVcHfFQ`JbILzwtWz}r&@!gj53&YcU z5QIViQgYRJHT=5-PEzHb%oes|wm}PkVu8rv4-o*9Mz5bZz6(iVD0LUs-3k&A#r^@n zPe+0xDoxUG88*!`(}Br`{U)bE;l4RV~t?kH}sl;s~JS=nj}Y%P^HK z)N*A!(uJQq1DY=Ruu1OrE6KIZ8yf$|Z7kDEJ1nvuSg+AUTgDa6#Y1kq%im8@15VC6 zg{5vfIk>j9*6*v?9(id(iZ0w|H{^aRbIKJS?d)!5&KYuNWEy)LV>nhBSr)rz7suHq z8lSS`{l=`k34mZkWEsljDY7t;bI1VAU7;rL);vG)ifWP_As;2hwGYBR$a@<) z9;BShKfc#xu?EwfcF!7Y|CFPBY_(4nA*({W}DEwOu^wKa#o?k_|aH>k2jTE2BN zGi8)fO6hK?qe}>vUIliiX(C2*mpCCjZ#^IYx(FI~##tg;gf8*Wy@KE)jPNo4p&(mg zDle9eP~5=<64B_R5HZ6J5O$aUdqb|F{js*ZBpvqv7L*P-^A=8i`J`L(@GTwMGEU9J zzJeUxg1ysKo_eA~CstuOg)v!+kXL(E+Hy8Q6|mU6@gI>1^k?T}Tk?fZjpedL*7bG6 z;f_?JtR3Ayw$!9rooF|Quzb%u=0(#+= zmTsN#+SO;}rkAW1RwXYMX8dyyQoXtqilcFo?}@4t884y84XVEW!yfnNwD1LVZ>e0+ zi{+K5{NJ>O)Ky*pdYDAAxH+WVV;?DqLqzun0gU{A$pXfmCB6lWn9bF8D=>V83-HQA z=lGWY$WRzhmf-TXA$a^(#0JS~dKw$|eUPMmmD6EinkI&*h(k*h-{L3Kij#3P_Nnrb zNQqKO>3F#8levDFVI6=r6n}M?KONE=S$WQzN?lEdf#~Yr8msmcEvbPJgf68)RLo#I;~^vi&G%Y6(#f(5W$^I;(|wYcr@x zZ$^iwfJBIh-_0xb?Go9JujXVNj62ly%{&cDI$Th0qghb;^R zE6C)5$OIk<_m$lof`%_a-1iUDz;68ScF}pgp~+v7Is}jOYGSK1v~fXQ(Se`KEae#a zMg??^*6&{V0|Yr>Ol-pV#c?eQBTK)k)QJC63M7`uPO#%eOs)VgDDs?=%(hq8O_iTl zMediWU-;(db*YzR!ZB^CAwTptg~1IdARnrmdiRFi{d985hNuj8z#@%Km{+NnbBhGH06zVuIEbEN-&v4yRMj!Ck|E! z){ee#Esw=J#V9#E@_w!=uX>uSC$iVUK`Z{rKm#}ZZr|yGsi5Ul?@>Q)M0Y2n=gicV z=jCU!M=ks=lwCc@tptia(~+t9^5A);MwkShh{}m|4whel&rjZ6B#N0}h}qDsaPuvC zZygUkbAtlKAO>(|ZFP0N+}PWiZogY+y)Oy-W_TSi3KS`xXpWx&2?> zrp7mKQ}w#xvx4{o$JK-s&fLG@_C9xSfj4u>mp7Ck9C7K{^gUZu{r)$?vN3oBi$ks zibL+QV!rmxiB{Q0gZ9-g^^NWdANJ(NqZ{z2KJl*M!*xtRLb_DV0GIhvsHPF!{E5>Q zOvTOMTXap%%O&&WnD2R??`1vH=O$J6>6q>1e6#%JxwrM9p1}KR@#SW*)%chVv)lW* zy0_^f@x)lvTN9S%;ROC^+vr6|GBj&=FLBTK^`$HS<>6%lxG9-apqD_1%f5Su4!VgL z^e!WLdmCE^I!99{t8do&W|p)@R>r_oi=Qg9XuYG{$3tEj4{2NuyKZLzKh0YxxDAbvXbA#jfk}6Ccztd`UOvp&3$FSeRMHkou;tl@hBzA<+`+Qq6YVnTDGi}}pN89hcr!wjD3>_u(f!m2Z zshooorR%dV(^k6+n|~bBvC9J#?-kZir^@l;m%wh#uZqwRkF*~)qCGg&s84h_)lt3A z*V5GkzBF+<9L`)zIGUMiX@0fXX-V(QwpwJIo6kVYmnO||CT91zUc#_Uj<^Q9QHkf8 zq2uS7Gx_W*%nJtye5?hHkM&k94 zFlUt8y{-OO<^?DBNJl$>SjRO!Je{tf!Sn4Y0{;1W)<%pkt^VO5$%MuDEQHxqhgcjV znx^8VLC2*No)wnD@=;HJ0ax?jeT26GV=DpNA8vEOORo z$|k_g>2XbskbdWyd$WhZ=kddNMvs54#|e2m=ku^9No)^X$9Xh8^PTtxJX={#d5%A) z{nkgo*z?oF{^2|1{n|R4Ra!Xm*#3_~Yx^wN?oD8x<_*k^EWr*tLs{i0buDF0O|eqi z*QTa=WxUQ)GYMoD&QsGf*v@9-;{N;Zudec#zel*H&4W(wHeJ{0feN}c3f$#6Z37n= zDKW)lV*YV}5=>#jydd6%k-xPoMg)(HgCq3Qc%vRO{|kG#D0qU5Q*EWXI?)PZ`07Ve zzJyTa&2h&xS?+~qvoC=6yMtmbd5Q}9lA`QknxqqaW3jKM8tRI*mdtj?WD+4FOhrSW zwl?`jfih4OnZJo=w?*7NbGSvUt;LI+(GuCE#yz3OuP^1!UG`gKLxwwOn@5qJg+OA; z!_Y1P{Zm~+bis|dt77hq2T80ai6!sBNoMb3jfpjr8%{pGwBL`W-K~8l1oLZShPx7_~HCqNgbUW8OMPxA7gzDDvvBgR1W|LXT zl8!i<;le!sbmXkz6Dx*Tsm&}RJi^JMuO6p~3vq!l;0Zgb@Zidy7R8EB@XSuSduFEhhPvzUa^>WXIlg}=ADG|E7kd_9t ziSVp=sbZ)HX3MvToq0g^DpdwgG41-?H(Fie1B-J}%T|kg*%=23Bi|S=f`@r48&@YJ zNjZ0CyeL79ZhCt4D9D+c@CvFC=Upf)7^OIL&!1O2JFiRWM$}jv*(ZPmNC;83;wU&d zTHvzktE#?Ga85G3=@R23cu5ogIc-C?)vWXLSDMfL$@1cw;aGaC?S86B?6}regVa>b z930G1;eZDZCXkt=t1G(V^;@2#K0o-J3r$Uv)rae18OJZK-fLhJk`t1%i#mo^UuT9`_cE-%8?kg(x)pU;A#RwoP3?t-GO<64B3?0z8Nhk1;;4c zi?6m1Ntf&EO< zY!Kd6{C=npsi%u>YG!12zn&AMWMk(=9#dCu$J3K3PJ6gcW zn1s1keiWr+^LSx8!FA>mVEXt?3_HG71$LdeMRiVBd~GD5Y50Iimt+XiX^(w)9EG!} zXqvb|HkZE!NBoo zdJWQnc%y3~6@boPAlO_bmV|+iJcXnTC;&|4LGzK6L^f{ZX!5KIEN6|aY-qud zOVy>>V&Z&=GqA3to2F?m1yLI%Sb8?b-`)Ct0}R?p&zn44fN!@uG^*E{`mlqHCBEz_Z z3LTv`k;!Evh`!EMmLj#tp;&?BGsS41^HPs@@j|^dAraB1Nz3b9n;u?9d@ctk<4_)^ zHX5N~v<_PpE_8ww&jO#@v1jcf^iQxfpNq*%&^&@v5VEBBc=%B=%T(bpz&Bnb&b_EzO+zH2b@H4nvG=Tg7xZIR+(L=CN^jx5UW%lX9H zoJ>VU^f=L_jEBahvWI%QMzOA3?P(r=qp@Ap%TAe(;3IcjlB^?ZWG=P>0wXeOn#2%F z#)5e*Da9eP9yMrBBYyrB`HF5SzHW|4xoL1tcG$DF(AO&5k)vSVj65m7(oY@`5@toC z)pvOFB_KuFSzS;XPD9>Vo^6J^Lba}#cWN2S0RipBhh%3BHS6gm(AL@(BGu3g^ly*8 z9WS+8W^Bq$i;h~&vAEVxYgS8QMfJj_2zH$^3U*ksp`7u1Oj4Y3nh_QqL54wP&uH|` zk19qBkhNr5quEeNQ6bg>U+hcRxg3Co@51WpZfa;Q!R6Y zTZclTO`nN=%g;-l7yyYGYVafyMo73KDfby!Vji)v5iF*h+CEd}P|E*y8v`x^vVXf> z4DXT1`4-;4FOhs&y}z*Zh|A#`8oB?2hXJXY6LNz^?A958QTg}EKn|er4KlAPT=PU~ z>P|3T%#F&y>PD1ph4!^1WJb?D`VJdAUuXQ{OSz{HRxY$VEcT*5esSJxhQ9A@blZ9n zL_b+{@^jMXMPRjW3EHG`dsC60yzl0;@10w&`l7nh8S7H2eVuPzWzmhw>d5S@xa3mG z3p&yASPa@1h=q=W(&FC+Fe8sA+`b;k9K7RspP<}Uf2}g-YVmGIq`Tw@XR@Fq5ZB#zrk@~2OK&3*dvL|RE@)QG}YXS@lH1Epnp#` zKNc7nOP`e-8y$WZ!YYtiQWEu~ZZy3~y!$}|BN35?F@+%Rag$@Bd>SCb7NpsC(5l>L z+pgPps5`$s35+WVJ)hPF#(8qe;jxU$7%jPR_eyeqNxTj5UWN9wC5S5NqQ(n~1zTP( zH3OdawFRGEXg>m5>IyNioDWGAOw-L-- zT#pbYm~EyOo7IRJH1ui!s;p_acHSJsKJTBMF!{yE+zq6yD)T9M$z0z3^;Jo!6z9GI zsDTV3sO>o-C{2))Onw~BIz0?gHgK^g(@$mN?O|x}O zxcq|gcJyY3F-*e9rG8PLCuOD=^Q*gf!xgE+s8*z2fvefdaoOOY^W%jAB^e&8)?O0^;ETH2eV7!ht&fuKZxIdq?F$?cBNnp7?&6p70E;W! z{UwK1gp{(eA2G+VR;TwW4%$tbF51Dn5>Ytc9xFb{A7Q{6vqeUHrOR&A5+KD8j;nwg zh`{+#HiQ(pdcvbMK%(K2ybQS*YB(_wDUz)Ewu7Q1`Q#Z3@8W4nR=!h`*i7qIHVBvaMGAh7NGV z*EDs?({xQ=^iXwUo|3yIDNp_6+VX^MJ%=l&kX!eljf?te5u^tD<`s4H`7nlC!gRHH z4H`1XX|cs&5Gwbf^AyoSuz=ali)h1|v#SkeZ^4hBxn&U12kroKy?Z|P-=djMpl{#Z zIow%t3+4|ARj+%T`7lqSufN8t@t!e0hWdE?pMF(?f{@n`pq8crU;MMW|EEs9{LdS}gdc5I2hK>y?u52RaW8t!F8oUNZEO$0!lXl()b?`kkfoazu!be!o7Pe* z9IX}9$EB<$Aso&V%v0t z{oAK@cAi)f(?WDI@zIL0c@vOjHUOev>-9;-m+U|X+!0FHzJgv@KBD142YXf`T*@tp z-W01Rd51Q!>y9cS&oUnO^IoC4ZGL?9j>?Ef(w+}PiQ{1{UYs<4M9}NI??bkCSDVYa zfdW$c5w(nV{d6BI*uI+WzddS;gni@Xu}R%@=oj1VFLsOA?P?pkm5C@d_(JDt(4l(w z6wIn;UZ}#P(}xqUyvdIz1m<@;BRK5#0XgaU5L4qhY1hgg}Km<1OCX42d$ z6_9#_WaE!)c%P^1##JoVI8;va_?cuPNz#LWB&3gT^OItC3d;dkGGuQ)>?~wcZ|9rA z*V|h7V_SSUIayv@2tD?jb#_{rLA-l)&gerDb6h$=v!0^JMQYNM9g5E8hCAC*iW=VT z$NhxzF?jvPE_31K_11aM!vKKed#WdbPPdj{zUw@DXjS=Hs-8f@WjQRwny@1ZJIv-O zoNTdC=;NEFd@V;uSHYM4E!ALU9G9&5a3MqE$1AP^GTuw-UPG1Q3P>V~&w!bqkclS} zIBA_!J|6E?=AXi@vjNEY{AJZ|(X~FOIb>GEXeODOtHturs%t%b$|75RBVABt$7!ch za?qS_6PhlGJ zv1Jh^G}CA_nA)QQWqG}H&GC~*%*-a`M52qgIKx@DDAwy2!j1kB4C2eJFCXBhA3Ty% zK5oeYMP(Px%G<*s=V^J1E8<54FBD{REOJHy(3Ik=l;ZQ7{TFH+UeN{TRM5S&RAxqz zth*u+r)K`a5{O{o_sfwu6WDQ&K<=vN$gB($xhST4Puu!30=a1vB(3P*Z)BDuSq31H zK~3S8Sv{_kR+N}kd-h2ug?~`h{b(IY{0K)1Ku~BU9a83#QeLmuXW$x}w;r1xL?~GuSWAAxv2~CJ^7_>K{N3`iK_3oV+X)CZe7 zyv%5uLd3Cl%h3-mEC?G@j5dQpPO80#C?Wh;!@>lgiJo|Bq4%4jX2v9J1LH?|cmz|gACF9ou%{hwf|~OnvP`N_H?&S4qai+K-~<(PH{GM3U1o31abV)o0$oc zB>tJ!5?rRF2;8!6I@g_&xS0Of?jbH%sqBXQB14!X?sT@PHOgAc1%;_cVXe1L>l1du ziW{t+@x~&o(GdaM8!J0lqF*0|p?cM2r1|jWRQU?I<1d}qrSsw8apYz!dOqWLeW+id zFdAb%8@KXe2$M?3gc|*3FTnY24P#mW$*X3yMN$PgiAC-sen4dkviclR9#KiN0I#8x zmgbZ-e;SkH=-Zs-fU2e4e&Ya&0HSOjtcXqfD}ux#m1BZLX$haGp$hxt@=Co`2j$Ir z@y**+cH)SvwXgfM&Be6I2?#A4^_$@zHe0fDQb2T_UDGJu{Yu6c*IyKjXD}KE%M;MV z8c$wYhDoPV&8!inDyIYiCd|}m>3;Z(lTF~SJC3_UB^hz&;ESRa) z|E=lz>lFtV0Z|l<&Sf+KCsrnl!TjLL0c0Lhvc;)%D^Ih^q)tkA*pkd@J}E!%G#7M?wB0jcNzdscXE-#pTP#b!b}GnhzWkw-K_U)G%xs>wh=eh<7>T?c6ZwJ(|5g$n z(OwaC#{Ix1Vi!%zIC8E&IX{9;HGeZGXe6*wO#R8IO*OvCvqJu%J{+I9&!ff*j15cK zy=wL*Fz0+K?@|>nD!`eGZZDD`4xklb3Z#fl7b}m_%Gi$|8ii$>C@ZIoBRg1o(LQ3E zm*^$bh6q;%tJPTx1eIhL11N*1v7{3)+CGe$jk~O!J3V>CQF{i|_IU>`OH5}o)3z7R zv8)^CG%vQ6Bebm|FCoBA@t_0P6{5aD)5c>OLl^HqZxcG=oO6ixsDf>5Wqe7VU_3(o zVdu-}Nd;SJOT%Ot6W4QaO{=Y7U}e(ti2$??)n4*$fhTm8Qf%=Bs}&U?JfCb5XJ6}Y z^us0Ur1Hsj=3i&)1oCMdU35R661@qa8b}=V6`~dIw_Mq-NO+AkcEEYmf$t)y{WfyG zy%)G+P)J{F#$abEuWY61RLhD1RG;7&Qi8~*Y0{E8TEOeGJbkS^Ommi5F>>orK2oBnz*NX? ze5TXv#Cz=ibK?;!gmLUAy?URbEMtfGwS`cHf%xl}CS3C4v)t{0>X!f+Z7$V?=(bsX zw`j|SW#U+)fV0NKwVitUod^hFghf(%$_KR5DxnF>q8Qsg%@hiH5k1=pY62xG@QxJ-+9rb#0M=!(7KOT z?MI)6MOlN53A=q_2z+?8J<`Hg6SX~8F6i6l1&aZJT)urV)9|J`6=opjaQ}jCwmjR8 zeE+?|?1Io(O6~G8f)S_+03IV_3rEtuUT2TGE$TyA7zQZ|=9;Qapvn7l*VOxvUo z!b{^&lf`;4ZUHmKfCayp(}>Dl43-frKU7Vr8qo0koaMkau&(B$;j4;6#vVs#eYPUn8mC|z!1}cBK=zzi>OhJ2$l62CP38XH(*eXrEGT!XtI_|rY z_E^wdT9;X?xzi9}S8nGXg2i1A>2QAd_%wtTr5Q_$yQ`&rc{27U7o7;b>3n2cPcmaj zvJ{?v^c{EVOcf^63s{Az5x9%z#u9WOd98N@42y4`h}9!`W6k5A!WEE`ZJrsgRoh$l z@ujbP%jtOyZD;>;iKkZ(zNZ0ITr5JZ)nWYEQVs0v{=b?2?wYAk{mrBh_9EpGdcs9` zT%j2Cheiu5RdEUnad0VPR%bdD_M>30Pv z&TvkSscWel6@s$GEQC6ZH4o_46H!IyiB}Ag5t3lRD_z~lN)udeCF0gf>*vHVtUzps zrflKn2-Z*@d_n3@Sba*Fw~3!1E~N@wgaV}SDTTuPOT7_E?_&|9wDlFV3z*-jMa>7u zeo>m=kV zi;MjX;iPvc))>15q|(KI;46YiCe4LhfcLWB>Emz=uF&C3Fw)Ez%To4tZpg)EVB6T; zxS_kx8x13Ns?&7hc3~VVu!$uK3TJ;*$)rtWF123niD_&RGm%tVShSv4jJ)t&?IqIu zQq*J94VkRY8dsD%4=*p!iLsp*Ht2>Yk;|*OWozm1w7c?-jGyP}twtf>vnS#HXTo%1 ziR%i)*JB$EJnqDczN9bslO;^|zK@I(m^8kGzf70F`T8yCEHhqWhQ5WV;X7hQpZeCy zz$mU_TOfk{vQ5m9G^}-pIS%vN($0yf3pXwWWzB81Qhtu%ZXw%2@h1uS}AMiTrk1vHw`G#v$%2{NM;ue|PRDbI-wBNOSulaWE2&W|;_~ z@9=5@e9fMS5Z=wb1<(#ouVm$w!2F(I~)@oRkw&J6KS z)?_>S&vTb}GRH7;ZB%+f^@#(DHVpOFR@m|0)HTMg>*!&RA-Myx(P9m{_4V1Sg&v1Q zz3)HXxV}pG9?D$|Mm(KDngzSJ%`0G3I4G9(<}u<`w`L+qt~@FP;{bx@L%a1O(ka;U z$Z1`jOr|nk`kYYC@#cX+n|M0k5q=FVuy9N;*f5ANP&GIVISiKt=};Q9IkP}7v3^(H ze?r=#wl+>c8>iPQ?sh;&-5<^SmF&L(1sYv-)KGvPv_C-gkbfN0&~IKl0IeKZe!3vK zFE|KR@vehDkc1oo1B3a87z_*})C=Q3dQ9k~%-I?m|KB1mjd}YL4jOR~bZ8|0m;4iw zU-JJ%|0m_INojJYE!~D9n4lvp)xS{kq`y$s298cZ2S=8FkpH>*k*>cIW4e>~8lfZ< zXadRpMRbQ={*yU715@BH-p}y=N_+24@=gR=njfJB12tFs$6-tUZ`yxz?w2-CXW~dJ z^eHeN$$!k<{${K}bNo+&er55G%-zS_cdz{$GsE_G%ui?A_c8aK>;A@&as3_h)79>M z%zeL{zcGw_f5-gv(s>_q-&fvm%o0?q{Fe|tvqelqUwDE?*~#r;agANz~< z#qV$3{1)#|_$7XSBj-Nj->cZ)1ZXJxk8J#2)7}UCdvWn6pg8vrz<-t+_oe?m8TwP2 cr|=KyUsI!3vIvMje)JUj@rH&FN`74Z7n!o5O#lD@ literal 0 HcmV?d00001