@@ -64,6 +64,54 @@ func.func @test_quantizelinear_f8(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.
64
64
65
65
// -----
66
66
67
+ // CHECK-LABEL: @test_quantizelinear_per_channel_si8
68
+ func.func @test_quantizelinear_per_channel_si8 (%arg0: !torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, %arg1: !torch.vtensor <[4 ],f32 >, %arg2: !torch.vtensor <[4 ],si8 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si8 > attributes {torch.onnx_meta.ir_version = 10 : si64 , torch.onnx_meta.opset_version = 19 : si64 } {
69
+ // CHECK: %[[DTYPE:.+]] = torch.constant.int 12
70
+ // CHECK: %[[AXIS:.+]] = torch.constant.int 1
71
+ // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_channel %arg0, %arg1, %arg2, %[[AXIS]], %[[DTYPE]]
72
+ // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]]
73
+ %0 = torch.operator " onnx.QuantizeLinear" (%arg0 , %arg1 , %arg2 ) {torch.onnx.axis = 1 : si64 } : (!torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, !torch.vtensor <[4 ],f32 >, !torch.vtensor <[4 ],si8 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si8 >
74
+ return %0: !torch.vtensor <[4 ,3 ,7 ,7 ],si8 >
75
+ }
76
+
77
+ // -----
78
+
79
+ // CHECK-LABEL: @test_quantizelinear_per_channel_ui8
80
+ func.func @test_quantizelinear_per_channel_ui8 (%arg0: !torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, %arg1: !torch.vtensor <[4 ],f32 >, %arg2: !torch.vtensor <[4 ],ui8 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 10 : si64 , torch.onnx_meta.opset_version = 19 : si64 } {
81
+ // CHECK: %[[DTYPE:.+]] = torch.constant.int 13
82
+ // CHECK: %[[AXIS:.+]] = torch.constant.int 1
83
+ // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_channel %arg0, %arg1, %arg2, %[[AXIS]], %[[DTYPE]]
84
+ // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]]
85
+ %0 = torch.operator " onnx.QuantizeLinear" (%arg0 , %arg1 , %arg2 ) {torch.onnx.axis = 1 : si64 } : (!torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, !torch.vtensor <[4 ],f32 >, !torch.vtensor <[4 ],ui8 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],ui8 >
86
+ return %0: !torch.vtensor <[4 ,3 ,7 ,7 ],ui8 >
87
+ }
88
+
89
+ // -----
90
+
91
+ // CHECK-LABEL: @test_quantizelinear_per_channel_si16
92
+ func.func @test_quantizelinear_per_channel_si16 (%arg0: !torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, %arg1: !torch.vtensor <[4 ],f32 >, %arg2: !torch.vtensor <[4 ],si16 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si16 > attributes {torch.onnx_meta.ir_version = 10 : si64 , torch.onnx_meta.opset_version = 19 : si64 } {
93
+ // CHECK: %[[DTYPE:.+]] = torch.constant.int 27
94
+ // CHECK: %[[AXIS:.+]] = torch.constant.int 1
95
+ // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_channel %arg0, %arg1, %arg2, %[[AXIS]], %[[DTYPE]]
96
+ // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]]
97
+ %0 = torch.operator " onnx.QuantizeLinear" (%arg0 , %arg1 , %arg2 ) {torch.onnx.axis = 1 : si64 } : (!torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, !torch.vtensor <[4 ],f32 >, !torch.vtensor <[4 ],si16 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si16 >
98
+ return %0: !torch.vtensor <[4 ,3 ,7 ,7 ],si16 >
99
+ }
100
+
101
+ // -----
102
+
103
+ // CHECK-LABEL: @test_quantizelinear_per_channel_si32
104
+ func.func @test_quantizelinear_per_channel_si32 (%arg0: !torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, %arg1: !torch.vtensor <[4 ],f32 >, %arg2: !torch.vtensor <[4 ],si32 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si32 > attributes {torch.onnx_meta.ir_version = 10 : si64 , torch.onnx_meta.opset_version = 19 : si64 } {
105
+ // CHECK: %[[DTYPE:.+]] = torch.constant.int 14
106
+ // CHECK: %[[AXIS:.+]] = torch.constant.int 1
107
+ // CHECK: %[[QUANT:.+]] = torch.aten.quantize_per_channel %arg0, %arg1, %arg2, %[[AXIS]], %[[DTYPE]]
108
+ // CHECK: %[[REPR:.+]] = torch.aten.int_repr %[[QUANT]]
109
+ %0 = torch.operator " onnx.QuantizeLinear" (%arg0 , %arg1 , %arg2 ) {torch.onnx.axis = 1 : si64 } : (!torch.vtensor <[4 ,3 ,7 ,7 ],f32 >, !torch.vtensor <[4 ],f32 >, !torch.vtensor <[4 ],si32 >) -> !torch.vtensor <[4 ,3 ,7 ,7 ],si32 >
110
+ return %0: !torch.vtensor <[4 ,3 ,7 ,7 ],si32 >
111
+ }
112
+
113
+ // -----
114
+
67
115
// CHECK-LABEL: @test_qlinearconv_nobias
68
116
func.func @test_qlinearconv_nobias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
69
117
%0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[1 ],f32 >, !torch.vtensor <[1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
0 commit comments