Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions helion/_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ def save(self) -> None:
f"--- assertExpectedJournal({name})\n{expected}\n\n"
for expected in expected_values
)
# Remove the last newline to play nicer with some people's editors
f.truncate(f.tell() - 1)
os.rename(tmp, self.filename)

@staticmethod
Expand Down
1 change: 0 additions & 1 deletion test/test_associative_scan.expected
Original file line number Diff line number Diff line change
Expand Up @@ -1648,4 +1648,3 @@ def _test_cumsum_reverse_kernel_make_precompiler(x: torch.Tensor):
_RDIM_SIZE_1 = triton.next_power_of_2(x.size(1))
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_test_cumsum_reverse_kernel_kernel)(x, result, x.size(1), result.stride(1), x.stride(1), _RDIM_SIZE_1, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_atomic_add.expected
Original file line number Diff line number Diff line change
Expand Up @@ -147,4 +147,3 @@ def _atomic_add_overlap_kernel_make_precompiler(x: torch.Tensor, y: torch.Tensor
_BLOCK_SIZE_0 = 32
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_atomic_add_overlap_kernel_kernel)(indices, y, x, _BLOCK_SIZE_0, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_broadcasting.expected
Original file line number Diff line number Diff line change
Expand Up @@ -295,4 +295,3 @@ def _fn_make_precompiler(a, b):
_BLOCK_SIZE_1 = 16
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_fn_kernel)(a, b, out, a.size(0), a.size(1), a.stride(0), a.stride(1), b.stride(0), out.stride(0), out.stride(1), _BLOCK_SIZE_0, _BLOCK_SIZE_1, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_closures.expected
Original file line number Diff line number Diff line change
Expand Up @@ -205,4 +205,3 @@ def _call_func_arg_on_host_make_precompiler(a, alloc):
_BLOCK_SIZE_0 = 512
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_call_func_arg_on_host_kernel)(a, out, a.size(0), a.stride(0), out.stride(0), _BLOCK_SIZE_0, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_constexpr.expected
Original file line number Diff line number Diff line change
Expand Up @@ -116,4 +116,3 @@ def _fn_make_precompiler(x: torch.Tensor, s: hl.constexpr):
_BLOCK_SIZE_1 = 16
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_fn_kernel)(x, out, out.stride(0), out.stride(1), x.stride(0), b, _BLOCK_SIZE_0, _BLOCK_SIZE_1, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_control_flow.expected
Original file line number Diff line number Diff line change
Expand Up @@ -214,4 +214,3 @@ def _fn_make_precompiler(x: torch.Tensor, y: torch.Tensor):
output = torch.zeros_like(x)
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_fn_kernel)(x, y, output, output.stride(0), x.stride(0), y.stride(0), num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_generate_ast.expected
Original file line number Diff line number Diff line change
Expand Up @@ -548,4 +548,3 @@ def _torch_ops_pointwise_make_precompiler(x, y):
_BLOCK_SIZE_0 = 128
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_torch_ops_pointwise_kernel)(x, y, out, x.size(0), out.stride(0), x.stride(0), y.stride(0), _BLOCK_SIZE_0, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_indexing.expected
Original file line number Diff line number Diff line change
Expand Up @@ -171,4 +171,3 @@ def _pairwise_add_make_precompiler(x: torch.Tensor):
_BLOCK_SIZE_0 = 32
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_pairwise_add_kernel)(out, x, out.size(0), out.stride(0), x.stride(0), _BLOCK_SIZE_0, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_reduce.expected
Original file line number Diff line number Diff line change
Expand Up @@ -590,4 +590,3 @@ def _test_reduce_keep_dims_kernel_make_precompiler(x: torch.Tensor):
_RDIM_SIZE_1 = triton.next_power_of_2(x.size(1))
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_test_reduce_keep_dims_kernel_kernel)(x, result, x.size(0), x.size(1), result.stride(0), x.stride(0), x.stride(1), _BLOCK_SIZE_0, _RDIM_SIZE_1, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_specialize.expected
Original file line number Diff line number Diff line change
Expand Up @@ -184,4 +184,3 @@ def _fn_make_precompiler(x: torch.Tensor):
_BLOCK_SIZE_0_1 = 32
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_fn_kernel)(x, out, x.size(0), x.size(1), out.stride(0), out.stride(1), x.stride(0), x.stride(1), scale, _BLOCK_SIZE_0_1, num_warps=4, num_stages=3)

1 change: 0 additions & 1 deletion test/test_type_propagation.expected
Original file line number Diff line number Diff line change
Expand Up @@ -822,4 +822,3 @@ def root_graph_0():
out: "i32[s77]" = helion_language__tracing_ops__host_tensor('out')
store = helion_language_memory_ops_store(out, [block_size_0], convert_element_type, None); out = block_size_0 = convert_element_type = store = None
return None

1 change: 0 additions & 1 deletion test/test_views.expected
Original file line number Diff line number Diff line change
Expand Up @@ -117,4 +117,3 @@ def _fn_make_precompiler(x: torch.Tensor, y: torch.Tensor):
_BLOCK_SIZE_1 = 32
from helion.runtime.precompile_shim import make_precompiler
return make_precompiler(_fn_kernel)(x, y, out, out.size(0), out.size(1), x.size(0), x.size(1), y.size(0), out.stride(0), out.stride(1), x.stride(0), x.stride(1), y.stride(0), y.stride(1), _BLOCK_SIZE_0, _BLOCK_SIZE_1, num_warps=4, num_stages=3)

Loading