@@ -103,60 +103,60 @@ def aten_ops_batch_norm(
103103 )
104104
105105
106- @tensorrt_converter (torch .ops .aten .convolution .default )
107- def aten_ops_convolution (
108- network : TRTNetwork ,
109- target : Target ,
110- args : Tuple [Argument , ...],
111- kwargs : Dict [str , Argument ],
112- name : str ,
113- ) -> Union [TRTTensor , Sequence [TRTTensor ]]:
114- kwargs_new = {
115- "input" : args [0 ],
116- "weight" : args [1 ],
117- "bias" : args [2 ],
118- "stride" : args [3 ],
119- "padding" : args [4 ],
120- "dilation" : args [5 ],
121- "groups" : args [8 ],
122- }
123- # we do not handle transposed.
124- if args [6 ] is True :
125- raise RuntimeError (f"Target { target } does not support `transposed=True` " )
126- # we do not handle output_padding.
127- if args [7 ] not in ([0 ], [0 , 0 ], [0 , 0 , 0 ]):
128- raise RuntimeError (f"Target { target } has non-0 output_padding" )
129-
130- if len (kwargs_new ["stride" ]) == 1 :
131- return convolution .convNd (
132- network ,
133- target ,
134- source_ir = SourceIR .ATEN ,
135- name = name ,
136- is_conv1d = True ,
137- input_val = kwargs_new ["input" ],
138- weight = kwargs_new ["weight" ],
139- bias = kwargs_new ["bias" ],
140- stride = kwargs_new ["stride" ],
141- padding = kwargs_new ["padding" ],
142- dilation = kwargs_new ["dilation" ],
143- groups = kwargs_new ["groups" ],
144- )
145- else :
146- return convolution .convNd (
147- network ,
148- target ,
149- source_ir = SourceIR .ATEN ,
150- name = name ,
151- is_conv1d = False ,
152- input_val = kwargs_new ["input" ],
153- weight = kwargs_new ["weight" ],
154- bias = kwargs_new ["bias" ],
155- stride = kwargs_new ["stride" ],
156- padding = kwargs_new ["padding" ],
157- dilation = kwargs_new ["dilation" ],
158- groups = kwargs_new ["groups" ],
159- )
106+ # @tensorrt_converter(torch.ops.aten.convolution.default)
107+ # def aten_ops_convolution(
108+ # network: TRTNetwork,
109+ # target: Target,
110+ # args: Tuple[Argument, ...],
111+ # kwargs: Dict[str, Argument],
112+ # name: str,
113+ # ) -> Union[TRTTensor, Sequence[TRTTensor]]:
114+ # kwargs_new = {
115+ # "input": args[0],
116+ # "weight": args[1],
117+ # "bias": args[2],
118+ # "stride": args[3],
119+ # "padding": args[4],
120+ # "dilation": args[5],
121+ # "groups": args[8],
122+ # }
123+ # # we do not handle transposed.
124+ # if args[6] is True:
125+ # raise RuntimeError(f"Target {target} does not support `transposed=True` ")
126+ # # we do not handle output_padding.
127+ # if args[7] not in ([0], [0, 0], [0, 0, 0]):
128+ # raise RuntimeError(f"Target {target} has non-0 output_padding")
129+
130+ # if len(kwargs_new["stride"]) == 1:
131+ # return convolution.convNd(
132+ # network,
133+ # target,
134+ # source_ir=SourceIR.ATEN,
135+ # name=name,
136+ # is_conv1d=True,
137+ # input_val=kwargs_new["input"],
138+ # weight=kwargs_new["weight"],
139+ # bias=kwargs_new["bias"],
140+ # stride=kwargs_new["stride"],
141+ # padding=kwargs_new["padding"],
142+ # dilation=kwargs_new["dilation"],
143+ # groups=kwargs_new["groups"],
144+ # )
145+ # else:
146+ # return convolution.convNd(
147+ # network,
148+ # target,
149+ # source_ir=SourceIR.ATEN,
150+ # name=name,
151+ # is_conv1d=False,
152+ # input_val=kwargs_new["input"],
153+ # weight=kwargs_new["weight"],
154+ # bias=kwargs_new["bias"],
155+ # stride=kwargs_new["stride"],
156+ # padding=kwargs_new["padding"],
157+ # dilation=kwargs_new["dilation"],
158+ # groups=kwargs_new["groups"],
159+ # )
160160
161161
162162@tensorrt_converter (torch .ops .aten .div .default )
0 commit comments