Skip to content

Commit a67b6b8

Browse files
authored
Fix naming convention in quantizer
Differential Revision: D72594670 Pull Request resolved: #9941
1 parent 1e97232 commit a67b6b8

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

backends/cadence/aot/quantizer/quantizer.py

+22-22
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
from torch.ao.quantization.quantizer.composable_quantizer import ComposableQuantizer
4444

4545

46-
act_qspec_asym8u = QuantizationSpec(
46+
act_qspec_asym8s = QuantizationSpec(
4747
dtype=torch.int8,
4848
quant_min=-128,
4949
quant_max=127,
@@ -52,7 +52,7 @@
5252
observer_or_fake_quant_ctr=HistogramObserver.with_args(eps=2**-12),
5353
)
5454

55-
wgt_qspec_asym8u = QuantizationSpec(
55+
wgt_qspec_asym8s = QuantizationSpec(
5656
dtype=torch.int8,
5757
quant_min=-128,
5858
quant_max=127,
@@ -61,7 +61,7 @@
6161
observer_or_fake_quant_ctr=MinMaxObserver,
6262
)
6363

64-
wgt_qspec_asym8s = QuantizationSpec(
64+
wgt_qspec_sym8s = QuantizationSpec(
6565
dtype=torch.int8,
6666
quant_min=-128,
6767
quant_max=127,
@@ -72,17 +72,17 @@
7272

7373
bias_qspec: Optional[QuantizationSpec] = None
7474

75-
qconfig_A8uW8u = QuantizationConfig(
76-
act_qspec_asym8u,
77-
act_qspec_asym8u,
78-
wgt_qspec_asym8u,
75+
qconfig_A8W8 = QuantizationConfig(
76+
act_qspec_asym8s,
77+
act_qspec_asym8s,
78+
wgt_qspec_asym8s,
7979
None,
8080
)
8181

82-
qconfig_A8uW8s = QuantizationConfig(
83-
act_qspec_asym8u,
84-
act_qspec_asym8u,
85-
wgt_qspec_asym8s,
82+
qconfig_A8W8sym = QuantizationConfig(
83+
act_qspec_asym8s,
84+
act_qspec_asym8s,
85+
wgt_qspec_sym8s,
8686
None,
8787
)
8888

@@ -189,15 +189,15 @@ def get_supported_operators(cls) -> List[OperatorConfig]:
189189

190190
def get_cadence_default_quantizers() -> List[Quantizer]:
191191
return [
192-
CadenceAtenQuantizer(AddmmPattern(), qconfig_A8uW8u),
193-
CadenceAtenQuantizer(BmmPattern(), qconfig_A8uW8u),
194-
CadenceAtenQuantizer(Conv1dPattern(), qconfig_A8uW8s),
195-
CadenceAtenQuantizer(Conv2dPattern(), qconfig_A8uW8s),
196-
CadenceAtenQuantizer(LayerNormPattern(), qconfig_A8uW8u),
197-
CadenceAtenQuantizer(LinearPattern(), qconfig_A8uW8u),
198-
CadenceAtenQuantizer(MatmulPattern(), qconfig_A8uW8u),
199-
CadenceAtenQuantizer(ReluPattern0(), qconfig_A8uW8u),
200-
CadenceAtenQuantizer(ReluPattern1(), qconfig_A8uW8u),
192+
CadenceAtenQuantizer(AddmmPattern(), qconfig_A8W8),
193+
CadenceAtenQuantizer(BmmPattern(), qconfig_A8W8),
194+
CadenceAtenQuantizer(Conv1dPattern(), qconfig_A8W8sym),
195+
CadenceAtenQuantizer(Conv2dPattern(), qconfig_A8W8sym),
196+
CadenceAtenQuantizer(LayerNormPattern(), qconfig_A8W8),
197+
CadenceAtenQuantizer(LinearPattern(), qconfig_A8W8),
198+
CadenceAtenQuantizer(MatmulPattern(), qconfig_A8W8),
199+
CadenceAtenQuantizer(ReluPattern0(), qconfig_A8W8),
200+
CadenceAtenQuantizer(ReluPattern1(), qconfig_A8W8),
201201
]
202202

203203

@@ -244,6 +244,6 @@ class CadenceWakeWordQuantizer(CadenceQuantizer):
244244
def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
245245
if quantizers is None:
246246
quantizers = get_cadence_default_quantizers()
247-
quantizers.append(CadenceAtenQuantizer(AddPattern(), qconfig_A8uW8u))
248-
quantizers.append(CadenceAtenQuantizer(CatPattern(), qconfig_A8uW8u))
247+
quantizers.append(CadenceAtenQuantizer(AddPattern(), qconfig_A8W8))
248+
quantizers.append(CadenceAtenQuantizer(CatPattern(), qconfig_A8W8))
249249
super().__init__(quantizers)

0 commit comments

Comments
 (0)