Skip to content

Commit 66eef31

Browse files
Revert "[fx] change from #users to num_users in graph printout (pytorch#101140)"
This reverts commit e568c5a. Reverted pytorch#101140 on behalf of https://github.com/jeanschmidt due to There are internal changes to this commit that are preventing landing, so I am reverting to unblock the diff train ([comment](pytorch#101140 (comment)))
1 parent 616208b commit 66eef31

File tree

8 files changed

+30
-30
lines changed

8 files changed

+30
-30
lines changed

docs/source/fx.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -633,9 +633,9 @@ examine our traced module:
633633
# This print-out returns:
634634
"""
635635
graph():
636-
%x : [num_users=1] = placeholder[target=x]
637-
%y : [num_users=1] = placeholder[target=y]
638-
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %y), kwargs = {})
636+
%x : [#users=1] = placeholder[target=x]
637+
%y : [#users=1] = placeholder[target=y]
638+
%add : [#users=1] = call_function[target=operator.add](args = (%x, %y), kwargs = {})
639639
return add
640640
"""
641641

test/quantization/pt2e/test_quantize_pt2e.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -396,11 +396,11 @@ def test_qnnpack_quantizer_conv_linear(self):
396396
"""
397397
This test fails because linear decompositon changes due to the presence of
398398
permute node. In the below linear 1 is decomposed as
399-
%t_default : [num_users=1] = call_function[target=torch.ops.aten.t.default](args = (%_param_constant2,), kwargs = {})
400-
%clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_default,), kwargs = {memory_format: torch.contiguous_format}) # noqa: B950
401-
%_unsafe_view_default : [num_users=1] = call_function[target=torch.ops.aten._unsafe_view.default](args = (%clone_default, [8, 16]), kwargs = {}) # noqa: B950
402-
%mm_default : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%_unsafe_view_default, %t_default), kwargs = {}) # noqa: B950
403-
%view_default : [num_users=1] = call_function[target=torch.ops.aten.view.default](args = (%mm_default, [2, 2, 2, 8]), kwargs = {}) # noqa: B950
399+
%t_default : [#users=1] = call_function[target=torch.ops.aten.t.default](args = (%_param_constant2,), kwargs = {})
400+
%clone_default : [#users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_default,), kwargs = {memory_format: torch.contiguous_format}) # noqa: B950
401+
%_unsafe_view_default : [#users=1] = call_function[target=torch.ops.aten._unsafe_view.default](args = (%clone_default, [8, 16]), kwargs = {}) # noqa: B950
402+
%mm_default : [#users=1] = call_function[target=torch.ops.aten.mm.default](args = (%_unsafe_view_default, %t_default), kwargs = {}) # noqa: B950
403+
%view_default : [#users=1] = call_function[target=torch.ops.aten.view.default](args = (%mm_default, [2, 2, 2, 8]), kwargs = {}) # noqa: B950
404404
405405
Note the presence of cline and unsafe_view. This is due to permute
406406
"""

test/test_fx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1273,7 +1273,7 @@ def forward(self, x):
12731273
traced = symbolic_trace(st)
12741274
traced.graph.lint()
12751275
stringed = str(traced.graph)
1276-
for s in ['args', 'kwargs', 'num_users']:
1276+
for s in ['args', 'kwargs', '#users']:
12771277
assert s in stringed
12781278

12791279
def test_custom_proxy_type(self):

torch/fx/__init__.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ def forward(self, x):
2727
print(symbolic_traced.graph)
2828
"""
2929
graph():
30-
%x : [num_users=1] = placeholder[target=x]
31-
%param : [num_users=1] = get_attr[target=param]
32-
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
33-
%linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
34-
%clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
30+
%x : [#users=1] = placeholder[target=x]
31+
%param : [#users=1] = get_attr[target=param]
32+
%add : [#users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
33+
%linear : [#users=1] = call_module[target=linear](args = (%add,), kwargs = {})
34+
%clamp : [#users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
3535
return clamp
3636
"""
3737

torch/fx/experimental/const_fold.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -229,13 +229,13 @@ def mod_partition(node: torch.fx.Node):
229229
# because we are fetching attributes directly from the root module, instead of
230230
# fetching them from const_gm. Example: The const_gm must have some format like:
231231
# graph():
232-
# %inp : [num_users=1] = placeholder[target=const_inp]
233-
# %add : [num_users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {})
232+
# %inp : [#users=1] = placeholder[target=const_inp]
233+
# %add : [#users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {})
234234
# return add
235235
# We replace that with the following, which does not have any placeholders:
236236
# graph():
237-
# %inp_1 : [num_users=1] = get_attr[target=const_inp]
238-
# %add : [num_users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {})
237+
# %inp_1 : [#users=1] = get_attr[target=const_inp]
238+
# %add : [#users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {})
239239
# return add
240240
root_const_gm = torch.fx.GraphModule(split, const_gm.graph)
241241
for node in root_const_gm.graph.nodes:

torch/fx/graph.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -700,12 +700,12 @@ def forward(self, x):
700700
.. code-block:: text
701701
702702
graph(x):
703-
%linear_weight : [num_users=1] = self.linear.weight
704-
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
705-
%linear_1 : [num_users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
706-
%relu_1 : [num_users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
707-
%sum_1 : [num_users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
708-
%topk_1 : [num_users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
703+
%linear_weight : [#users=1] = self.linear.weight
704+
%add_1 : [#users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
705+
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
706+
%relu_1 : [#users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
707+
%sum_1 : [#users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
708+
%topk_1 : [#users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
709709
return topk_1
710710
711711
For the semantics of operations represented in the ``Graph``, please see :class:`Node`.

torch/fx/node.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -472,18 +472,18 @@ def format_node(self,
472472
return None
473473
maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''
474474
default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''
475-
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
475+
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
476476
elif self.op == 'get_attr':
477477
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
478-
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
478+
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \
479479
f'{self.op}[target={self._pretty_print_target(self.target)}]'
480480
elif self.op == 'output':
481481
if self.type and maybe_return_typename:
482482
maybe_return_typename[0] = f' -> {_type_repr(self.type)}'
483483
return f'return {self.args[0]}'
484484
else:
485485
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
486-
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
486+
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \
487487
f'{self.op}[target={self._pretty_print_target(self.target)}](' \
488488
f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
489489

torch/fx/passes/reinplace.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,9 @@ def run_node(self, node: Node):
8181
# For multi-output views, we want to map each output view to the base,
8282
# but this mapping involves two separate nodes in FX IR.
8383
# e.g. "a, b = x_1.split(...)" becomes:
84-
# %split_tensor : [num_users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
85-
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
86-
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
84+
# %split_tensor : [#users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
85+
# %getitem : [#users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
86+
# %getitem_1 : [#users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
8787
# And we'd like to set:
8888
# getitem1.meta['view_of'] = x_1
8989
elif node.target is _operator.getitem:

0 commit comments

Comments
 (0)