@@ -205,7 +205,14 @@ def forward(self, x):
205205
206206
207207class InvertedResidual (nn .Module ):
208- """ Inverted residual block w/ optional SE and CondConv routing"""
208+ """ Inverted residual block w/ optional SE
209+
210+ Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
211+ referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
212+ * MNasNet - https://arxiv.org/abs/1807.11626
213+ * EfficientNet - https://arxiv.org/abs/1905.11946
214+ * MobileNet-V3 - https://arxiv.org/abs/1905.02244
215+ """
209216
210217 def __init__ (self , in_chs , out_chs , dw_kernel_size = 3 ,
211218 stride = 1 , dilation = 1 , pad_type = '' , act_layer = nn .ReLU , noskip = False ,
@@ -333,7 +340,16 @@ def forward(self, x):
333340
334341
335342class EdgeResidual (nn .Module ):
336- """ Residual block with expansion convolution followed by pointwise-linear w/ stride"""
343+ """ Residual block with expansion convolution followed by pointwise-linear w/ stride
344+
345+ Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
346+ - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
347+
348+ This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
349+ * MobileDet - https://arxiv.org/abs/2004.14525
350+ * EfficientNet-X - https://arxiv.org/abs/2102.05610
351+ * EfficientNet-V2 - https://arxiv.org/abs/2104.00298
352+ """
337353
338354 def __init__ (self , in_chs , out_chs , exp_kernel_size = 3 , exp_ratio = 1.0 , fake_in_chs = 0 ,
339355 stride = 1 , dilation = 1 , pad_type = '' , act_layer = nn .ReLU , noskip = False , pw_kernel_size = 1 ,
0 commit comments