|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import torch |
| 4 | + |
| 5 | +import helion |
| 6 | +import helion.language as hl |
| 7 | + |
| 8 | +""" |
| 9 | + NOTE: layer_norm_fwd_ideal does not work! I am keeping this around as a reference |
| 10 | + to what I believed should have worked in Helion when I first began without debugging. |
| 11 | +
|
| 12 | + The user experience should be pushed this direction |
| 13 | +""" |
| 14 | +@helion.kernel(static_shapes=True) |
| 15 | +def layer_norm_fwd_ideal( |
| 16 | + x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float = 1e-5 |
| 17 | +) -> torch.Tensor: |
| 18 | + """ |
| 19 | + Layer normalization forward pass. |
| 20 | + |
| 21 | + Args: |
| 22 | + x: Input tensor of shape [batch_size, hidden_size] |
| 23 | + weight: Scale parameter of shape [hidden_size] |
| 24 | + bias: Bias parameter of shape [hidden_size] |
| 25 | + eps: Epsilon for numerical stability |
| 26 | + |
| 27 | + Returns: |
| 28 | + Normalized tensor of shape [batch_size, hidden_size] |
| 29 | + """ |
| 30 | + m = x.size(0) |
| 31 | + out = torch.empty_like(x) |
| 32 | + |
| 33 | + for tile_b in hl.tile(m): |
| 34 | + row = x[tile_b] |
| 35 | + mean, var = torch.var_mean(row) |
| 36 | + |
| 37 | + layer_norm_out = (row - mean) / torch.sqrt(var + eps) |
| 38 | + layer_norm_out = layer_norm_out * weight + bias |
| 39 | + out[tile_b, :] = layer_norm_out |
| 40 | + |
| 41 | + return out |
| 42 | + |
| 43 | +@helion.kernel(static_shapes=True, use_default_config=True) |
| 44 | +def layer_norm_fwd( |
| 45 | + x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor |
| 46 | +) -> torch.Tensor: |
| 47 | + m, n = x.size() |
| 48 | + assert weight.size(0) == n, f"weight size mismatch {weight.size(0)} != {m}" |
| 49 | + assert bias.size(0) == n, f"bias size mismatch {bias.size(0)} != {m}" |
| 50 | + out = torch.empty( |
| 51 | + [m, n], dtype=torch.float16, device=x.device |
| 52 | + ) |
| 53 | + |
| 54 | + eps = 1e-5 |
| 55 | + |
| 56 | + for tile_m in hl.tile(m): |
| 57 | + # acc = x[tile_m, :].to(torch.float32) works! We should not have to do this cast |
| 58 | + acc = x[tile_m, :] |
| 59 | + |
| 60 | + var, mean = torch.var_mean(acc, dim=-1, keepdim=True, correction=0) |
| 61 | + |
| 62 | + normalized = (acc - mean) * torch.rsqrt(var + eps) |
| 63 | + acc = normalized * (weight[:].to(torch.float32)) + (bias[:].to(torch.float32)) |
| 64 | + |
| 65 | + out[tile_m, :] = acc |
| 66 | + return out |
| 67 | + |
| 68 | + |
| 69 | +def check(batch_size: int, hidden_size: int) -> None: |
| 70 | + from triton.testing import do_bench |
| 71 | + |
| 72 | + # Create random input tensors |
| 73 | + x = torch.randn([batch_size, hidden_size], device="cuda", dtype=torch.float16) |
| 74 | + weight = torch.randn([hidden_size], device="cuda", dtype=torch.float16) |
| 75 | + bias = torch.randn([hidden_size], device="cuda", dtype=torch.float16) |
| 76 | + |
| 77 | + # Run Helion kernel |
| 78 | + result = layer_norm_fwd(x, weight, bias) |
| 79 | + |
| 80 | + # # Run PyTorch layer norm for comparison |
| 81 | + torch_result = torch.nn.functional.layer_norm( |
| 82 | + x, [hidden_size], weight, bias, eps=1e-5 |
| 83 | + ) |
| 84 | + |
| 85 | + # # Check correctness |
| 86 | + torch.testing.assert_close(result, torch_result, rtol=1e-2, atol=1e-1) |
| 87 | + |
| 88 | + # Benchmark Helion implementation |
| 89 | + helion_sec = do_bench(lambda: layer_norm_fwd(x, weight, bias)) |
| 90 | + |
| 91 | + # Benchmark PyTorch implementation |
| 92 | + torch_sec = do_bench(lambda: torch.nn.functional.layer_norm( |
| 93 | + x, [hidden_size], weight, bias, eps=1e-5 |
| 94 | + )) |
| 95 | + |
| 96 | + print( |
| 97 | + f"Helion time: {helion_sec:.4f}ms, torch time: {torch_sec:.4f}, speedup: {torch_sec / helion_sec:.2f}x" |
| 98 | + ) |
| 99 | + |
| 100 | + |
| 101 | +def main() -> None: |
| 102 | + # Test with different sizes |
| 103 | + print("Testing batch_size=128, hidden_size=768") |
| 104 | + check(128, 768) |
| 105 | + |
| 106 | + print("\nTesting batch_size=32, hidden_size=1024") |
| 107 | + check(32, 1024) |
| 108 | + |
| 109 | + |
| 110 | +if __name__ == "__main__": |
| 111 | + main() |
0 commit comments