Skip to content

Commit d4dd799

Browse files
jerryzh168facebook-github-bot
authored andcommitted
[quant][pt2e] Rename _pt2e to pt2e (pytorch#104668)
Summary: Pull Request resolved: pytorch#104668 X-link: pytorch/executorch#3 att Test Plan: Imported from OSS Reviewed By: andrewor14 Differential Revision: D47202807 fbshipit-source-id: 17a68fed962b6520c8d12ea58053ff2ee2017c30
1 parent 1a66163 commit d4dd799

22 files changed

+26
-26
lines changed

test/inductor/test_inductor_freezing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
import torch
1313

1414
import torch._dynamo as torchdynamo
15-
import torch.ao.quantization._pt2e.quantizer.x86_inductor_quantizer as xiq
15+
import torch.ao.quantization.pt2e.quantizer.x86_inductor_quantizer as xiq
1616
from torch import nn
1717
from torch._inductor import config
1818
from torch._inductor.compile_fx import compile_fx
1919
from torch._inductor.utils import override_lowering, run_and_get_code
20-
from torch.ao.quantization._pt2e.quantizer import X86InductorQuantizer
2120
from torch.ao.quantization._quantize_pt2e import convert_pt2e, prepare_pt2e_quantizer
21+
from torch.ao.quantization.pt2e.quantizer import X86InductorQuantizer
2222
from torch.testing import FileCheck
2323
from torch.testing._internal.common_quantization import (
2424
skipIfNoDynamoSupport,

test/quantization/pt2e/test_quantize_pt2e.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
ObserverOrFakeQuantize,
1616
QConfigMapping,
1717
)
18-
from torch.ao.quantization._pt2e.quantizer import (
18+
from torch.ao.quantization.pt2e.quantizer import (
1919
ComposableQuantizer,
2020
DerivedQuantizationSpec,
2121
EmbeddingQuantizer,
@@ -27,10 +27,10 @@
2727
Quantizer,
2828
SharedQuantizationSpec,
2929
)
30-
from torch.ao.quantization._pt2e.quantizer.composable_quantizer import ( # noqa: F811
30+
from torch.ao.quantization.pt2e.quantizer.composable_quantizer import ( # noqa: F811
3131
ComposableQuantizer,
3232
)
33-
from torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer import (
33+
from torch.ao.quantization.pt2e.quantizer.qnnpack_quantizer import (
3434
get_symmetric_quantization_config,
3535
)
3636
from torch.ao.quantization._quantize_pt2e import (

test/quantization/pt2e/test_x86inductor_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import torch
44
import torch._dynamo as torchdynamo
55
import torch.nn as nn
6-
from torch.ao.quantization._pt2e.quantizer import (
6+
from torch.ao.quantization.pt2e.quantizer import (
77
X86InductorQuantizer,
88
)
99
from torch.ao.quantization._quantize_pt2e import (
@@ -19,7 +19,7 @@
1919
from torch.testing._internal.common_quantized import override_quantized_engine
2020
from enum import Enum
2121
import itertools
22-
import torch.ao.quantization._pt2e.quantizer.x86_inductor_quantizer as xiq
22+
import torch.ao.quantization.pt2e.quantizer.x86_inductor_quantizer as xiq
2323
from torch.testing._internal.common_utils import skip_but_pass_in_sandcastle
2424

2525

torch/_dynamo/skipfiles.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,9 +139,9 @@ def _module_dir(m: types.ModuleType):
139139
# TODO: find a better way to express this path without having to import
140140
# `torch.ao.quantization._pt2e`, which interferes with memory profiling
141141
FILENAME_ALLOWLIST |= {
142-
_module_dir(torch) + "ao/quantization/_pt2e/qat_utils.py",
143-
_module_dir(torch) + "ao/quantization/_pt2e/quantizer/qnnpack_quantizer.py",
144-
_module_dir(torch) + "ao/quantization/_pt2e/representation/rewrite.py",
142+
_module_dir(torch) + "ao/quantization/pt2e/qat_utils.py",
143+
_module_dir(torch) + "ao/quantization/pt2e/quantizer/qnnpack_quantizer.py",
144+
_module_dir(torch) + "ao/quantization/pt2e/representation/rewrite.py",
145145
}
146146

147147
# TODO (zhxchen17) Make exportdb importable here.

torch/_inductor/freezing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from torch._inductor.compile_fx import fake_tensor_prop
1414
from torch._inductor.fx_passes.freezing_patterns import freezing_passes
1515
from torch._inductor.fx_passes.post_grad import view_to_reshape
16-
from torch.ao.quantization._pt2e.utils import _fuse_conv_bn_
16+
from torch.ao.quantization.pt2e.utils import _fuse_conv_bn_
1717
from torch.fx.experimental.proxy_tensor import make_fx
1818
from . import config
1919
from .decomposition import select_decomp_table

torch/ao/quantization/_quantize_pt2e.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
from torch.fx import GraphModule
22

3-
from ._pt2e.prepare import prepare
4-
from ._pt2e._propagate_annotation import propagate_annotation
5-
from ._pt2e.qat_utils import (
3+
from .pt2e.prepare import prepare
4+
from .pt2e._propagate_annotation import propagate_annotation
5+
from .pt2e.qat_utils import (
66
_fuse_conv_bn_qat,
77
_fold_conv_bn_qat,
88
)
9-
from ._pt2e.utils import (
9+
from .pt2e.utils import (
1010
_get_node_name_to_scope,
1111
_fuse_conv_bn_,
1212
_rearrange_weight_observer_for_decomposed_linear,
1313
)
14-
from ._pt2e.representation import reference_representation_rewrite
14+
from .pt2e.representation import reference_representation_rewrite
1515
from .fx.prepare import prepare as fx_prepare
1616
from .quantize_fx import _convert_to_reference_decomposed_fx
1717
from torch.ao.quantization import QConfigMapping
18-
from torch.ao.quantization._pt2e.quantizer import Quantizer
18+
from torch.ao.quantization.pt2e.quantizer import Quantizer
1919
from torch.ao.quantization.backend_config import BackendConfig
2020

2121
from typing import Any, Tuple

torch/ao/quantization/fx/prepare.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@
106106
PrepareCustomConfig,
107107
StandaloneModuleConfigEntry,
108108
)
109-
from torch.ao.quantization._pt2e.quantizer import (
109+
from torch.ao.quantization.pt2e.quantizer import (
110110
EdgeOrNode,
111111
QuantizationSpec,
112112
FixedQParamsQuantizationSpec,

torch/ao/quantization/_pt2e/_propagate_annotation.py renamed to torch/ao/quantization/pt2e/_propagate_annotation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Callable
22

33
import torch
4-
from torch.ao.quantization._pt2e.quantizer import (
4+
from torch.ao.quantization.pt2e.quantizer import (
55
QuantizationAnnotation,
66
SharedQuantizationSpec,
77
)

0 commit comments

Comments
 (0)