We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b8848b5 commit 5c7bfadCopy full SHA for 5c7bfad
intel_extension_for_pytorch/quantization/_quantize.py
@@ -31,9 +31,9 @@ def prepare(
31
"""
32
assert isinstance(model, torch.nn.Module), "Only support nn.Module prepare for quantization path"
33
# auto model channels_last memory format conversion
34
- from ..frontend import auto_channels_last, _convert_convNd_weight_memory_format
+ from ..frontend import auto_channels_last, _convert_convNd_deconvNd_weight_memory_format
35
if auto_channels_last:
36
- _convert_convNd_weight_memory_format(model)
+ _convert_convNd_deconvNd_weight_memory_format(model)
37
try:
38
prepare_model = optimization.fuse(model, inplace=inplace)
39
prepare_model = linear_bn_fuse(prepare_model, inplace=inplace)
0 commit comments