Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions tests/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -3475,6 +3475,15 @@ def func(x):
return y1, y2
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})

@check_opset_min_version(11, "Unique")
def test_bincount(self):
x_val = np.array([5, 2, 3, 1, 3, 2, 7, 5, 9, 10], dtype=np.int32)
def func(x):
x_ = tf.math.bincount(x)
y_ = tf.identity(x_, name=_TFOUTPUT)
return y_
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})

@check_opset_min_version(11, "ScatterND")
def test_sparse_to_dense(self):
i_val = np.array([[0, 0, 0], [0, 0, 2], [0, 1, 3], [1, 2, 2], [1, 2, 3]], dtype=np.int64)
Expand Down
39 changes: 39 additions & 0 deletions tf2onnx/onnx_opset/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1841,6 +1841,45 @@ def version_11(cls, ctx, node, **kwargs):
ctx.copy_shape(new_node.output[2], cast_node.output[0])


@tf_op("Bincount")
class Bincount:
@classmethod
def version_11(cls, ctx, node, **kwargs):
# arr, size are int32
arr_inp, size_inp, weights_inp = node.input

arr_int64 = ctx.make_node("Cast", [arr_inp], attr={'to': TensorProto.INT64}).output[0]
size_int64 = ctx.make_node("Cast", [size_inp], attr={'to': TensorProto.INT64}).output[0]

weights_shape = ctx.get_shape(weights_inp)
res_dtype = ctx.get_dtype(weights_inp)
weights_is_zero = weights_shape is not None and 0 in weights_shape
utils.make_sure(weights_is_zero, "Non-empty weights not yet supported for bincount")

values, _, _, counts = ctx.make_node("Unique", [arr_int64], attr={'sorted': 1}, output_count=4,
op_name_scope=node.name).output
neg_one_const = ctx.make_const(utils.make_name("neg_one_const"), np.array(-1, np.int64)).output[0]
non_neg_val_locs = ctx.make_node("Greater", [values, neg_one_const]).output[0]
small_val_locs = ctx.make_node("Less", [values, size_int64]).output[0]
valid_val_locs = ctx.make_node("And", [non_neg_val_locs, small_val_locs]).output[0]

valid_values = ctx.make_node("Compress", [values, valid_val_locs], attr={'axis': 0}).output[0]
valid_counts = ctx.make_node("Compress", [counts, valid_val_locs], attr={'axis': 0}).output[0]

output_shape = ctx.make_node("Unsqueeze", [size_int64], attr={'axes': [0]}).output[0]

false_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[0])
zeros = ctx.make_node("ConstantOfShape", [output_shape], attr={'value': false_tensor}).output[0]

result = ctx.make_node("ScatterElements", [zeros, valid_values, valid_counts], attr={'axis': 0}).output[0]
result_cast = result
if res_dtype != TensorProto.INT64:
result_cast = ctx.make_node("Cast", [result], attr={'to': res_dtype}).output[0]

ctx.replace_all_inputs(node.output[0], result_cast)
ctx.remove_node(node.name)


@tf_op("SparseToDense")
class SparseToDense:
@classmethod
Expand Down