|
10 | 10 | # @manual=//deeplearning/trt/python:py_tensorrt |
11 | 11 | import tensorrt as trt |
12 | 12 | import torch |
13 | | -import torch_tensorrt.fx.tracer.acc_tracer.acc_utils as acc_utils |
14 | 13 | from torch.fx.immutable_collections import immutable_list |
15 | 14 | from torch.fx.node import Argument, Target |
| 15 | + |
| 16 | +import torch_tensorrt.fx.tracer.acc_tracer.acc_utils as acc_utils |
16 | 17 | from torch_tensorrt.fx.converters import acc_ops_converters |
17 | 18 | from torch_tensorrt.fx.converters.impl import activation, convolution |
18 | 19 |
|
@@ -103,62 +104,6 @@ def aten_ops_batch_norm( |
103 | 104 | ) |
104 | 105 |
|
105 | 106 |
|
106 | | -# @tensorrt_converter(torch.ops.aten.convolution.default) |
107 | | -# def aten_ops_convolution( |
108 | | -# network: TRTNetwork, |
109 | | -# target: Target, |
110 | | -# args: Tuple[Argument, ...], |
111 | | -# kwargs: Dict[str, Argument], |
112 | | -# name: str, |
113 | | -# ) -> Union[TRTTensor, Sequence[TRTTensor]]: |
114 | | -# kwargs_new = { |
115 | | -# "input": args[0], |
116 | | -# "weight": args[1], |
117 | | -# "bias": args[2], |
118 | | -# "stride": args[3], |
119 | | -# "padding": args[4], |
120 | | -# "dilation": args[5], |
121 | | -# "groups": args[8], |
122 | | -# } |
123 | | -# # we do not handle transposed. |
124 | | -# if args[6] is True: |
125 | | -# raise RuntimeError(f"Target {target} does not support `transposed=True` ") |
126 | | -# # we do not handle output_padding. |
127 | | -# if args[7] not in ([0], [0, 0], [0, 0, 0]): |
128 | | -# raise RuntimeError(f"Target {target} has non-0 output_padding") |
129 | | - |
130 | | -# if len(kwargs_new["stride"]) == 1: |
131 | | -# return convolution.convNd( |
132 | | -# network, |
133 | | -# target, |
134 | | -# source_ir=SourceIR.ATEN, |
135 | | -# name=name, |
136 | | -# is_conv1d=True, |
137 | | -# input_val=kwargs_new["input"], |
138 | | -# weight=kwargs_new["weight"], |
139 | | -# bias=kwargs_new["bias"], |
140 | | -# stride=kwargs_new["stride"], |
141 | | -# padding=kwargs_new["padding"], |
142 | | -# dilation=kwargs_new["dilation"], |
143 | | -# groups=kwargs_new["groups"], |
144 | | -# ) |
145 | | -# else: |
146 | | -# return convolution.convNd( |
147 | | -# network, |
148 | | -# target, |
149 | | -# source_ir=SourceIR.ATEN, |
150 | | -# name=name, |
151 | | -# is_conv1d=False, |
152 | | -# input_val=kwargs_new["input"], |
153 | | -# weight=kwargs_new["weight"], |
154 | | -# bias=kwargs_new["bias"], |
155 | | -# stride=kwargs_new["stride"], |
156 | | -# padding=kwargs_new["padding"], |
157 | | -# dilation=kwargs_new["dilation"], |
158 | | -# groups=kwargs_new["groups"], |
159 | | -# ) |
160 | | - |
161 | | - |
162 | 107 | @tensorrt_converter(torch.ops.aten.div.default) |
163 | 108 | @tensorrt_converter(torch.ops.aten.div.Tensor_mode) |
164 | 109 | @tensorrt_converter(torch.ops.aten.div.Tensor) |
|
0 commit comments