-
Notifications
You must be signed in to change notification settings - Fork 191
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[float8nocompile] Add alternate Triton kernels for FP8 conversion which use atomic_max-based algo instead of reduction-based algo #1455
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,9 +7,9 @@ | |
""" | ||
Triton kernels for scaling high precision tensors to float8. | ||
""" | ||
from enum import Enum | ||
|
||
import torch | ||
|
||
import triton | ||
import triton.language as tl | ||
|
||
|
@@ -31,8 +31,86 @@ | |
} | ||
|
||
|
||
class KernelAlgorithm(Enum): | ||
"""Enum for FP8 conversion strategy.""" | ||
|
||
# use atomic max to compute global amax between blocks | ||
ATOMIC_MAX = "atomic_max" | ||
|
||
# reduce shared buffer containing local block amaxes to find global amax | ||
REDUCTION = "reduction" | ||
|
||
|
||
kernel_configs = [ | ||
triton.Config({"BLOCK_SIZE": 128}, num_warps=1), | ||
triton.Config({"BLOCK_SIZE": 256}, num_warps=2), | ||
triton.Config({"BLOCK_SIZE": 512}, num_warps=4), | ||
] | ||
|
||
|
||
# --- atomic max version of kernel --- | ||
@triton.autotune(configs=kernel_configs, key=["input_size"]) | ||
@triton.jit | ||
def _block_amax_atomic( | ||
input_ptr, | ||
amax_ptr, | ||
num_elements, | ||
input_dtype: tl.constexpr, | ||
BLOCK_SIZE: tl.constexpr, | ||
EPS: tl.constexpr, | ||
): | ||
# compute local amax for each block | ||
block_id = tl.program_id(axis=0) | ||
block_start = block_id * BLOCK_SIZE | ||
block_offs = block_start + tl.arange(0, BLOCK_SIZE) | ||
block_mask = block_offs < num_elements | ||
vals = tl.load(input_ptr + block_offs, mask=block_mask).to(input_dtype) | ||
block_amax = tl.max(tl.abs(vals), axis=0) | ||
tl.atomic_max(amax_ptr, block_amax) | ||
|
||
|
||
@triton.autotune(configs=kernel_configs, key=["input_size"]) | ||
@triton.jit | ||
def _to_fp8_atomic( | ||
input_ptr, | ||
scale_out_ptr, | ||
amax_ptr, | ||
out_ptr, | ||
num_elements, | ||
fp8_dtype_min, | ||
fp8_dtype_max, | ||
input_dtype: tl.constexpr, | ||
output_dtype: tl.constexpr, | ||
BLOCK_SIZE: tl.constexpr, | ||
EPS: tl.constexpr, | ||
): | ||
# compute scale, must be fp32 | ||
global_amax = tl.load(amax_ptr) | ||
scale = (fp8_dtype_max / tl.clamp(global_amax, min=EPS, max=float("inf"))).to( | ||
tl.float32 | ||
) | ||
|
||
# only one program needs to store the scale | ||
block_id = tl.program_id(axis=0) | ||
if block_id == 0: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not sure why this is needed, maybe delete since doing per-program custom logic increases complexity? if you want to expose the scale to be able to test it numerically, IMO having your atomics kernel output the scale is simpler There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Since all N thread blocks / program ids will calculate the same scale value for the tensor, we only need 1 of them to store the scale output. I figured having all N programs write the same value to the same memory location would be redundant.
I need the scale to use in the Float8Tensor constructor, but yeah I agree it would be simpler to have the atomic amax kernel do the scale calculation right there rather than storing the amax then using it to calculate the scale here. I will refactor. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
that's a good reason, just put that in the comment
I think having a triton program have if statements which branch by program ID is a pattern to avoided (even if it works) because it's suprising / isn't really how people commonly write triton kernels. IMO we can just refactor your atomic kernel code to also write out the scale to get to the same place with simpler code. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Done
Ok good to know, thanks - refactored to remove this pattern and simplify the code, let me know what you think. |
||
scale_offs = tl.arange(0, 1) | ||
tl.store(scale_out_ptr + scale_offs, scale) | ||
|
||
# load block of input tensor | ||
block_start = block_id * BLOCK_SIZE | ||
block_offs = block_start + tl.arange(0, BLOCK_SIZE) | ||
mask = block_offs < num_elements | ||
vals = tl.load(input_ptr + block_offs, mask=mask).to(input_dtype) | ||
|
||
# perform conversion | ||
vals = vals * scale | ||
fp8_vals = tl.clamp(vals, min=fp8_dtype_min, max=fp8_dtype_max).to(output_dtype) | ||
tl.store(out_ptr + block_offs, fp8_vals, mask=mask) | ||
|
||
|
||
# --- reduction version of kernel --- | ||
@triton.jit | ||
def _block_amax( | ||
def _block_amax_reduction( | ||
input_ptr, | ||
block_amaxes_ptr, | ||
num_elements, | ||
|
@@ -51,7 +129,7 @@ def _block_amax( | |
|
||
|
||
@triton.jit | ||
def _fp8_scale( | ||
def _fp8_scale_reduction( | ||
block_amaxes_ptr, | ||
scale_out_ptr, | ||
num_elements, | ||
|
@@ -75,7 +153,7 @@ def _fp8_scale( | |
|
||
|
||
@triton.jit | ||
def _to_fp8( | ||
def _to_fp8_reduction( | ||
input_ptr, | ||
scale_ptr, | ||
out_ptr, | ||
|
@@ -108,12 +186,10 @@ def triton_hp_tensor_to_float8_dynamic( | |
fp8_dtype: torch.dtype, | ||
linear_mm_config: LinearMMConfig, | ||
gemm_input_role: GemmInputRole = GemmInputRole.INPUT, | ||
algo: KernelAlgorithm = KernelAlgorithm.ATOMIC_MAX, | ||
) -> Float8Tensor: | ||
|
||
assert hp_tensor.is_contiguous(), "tensor must be contiguous" | ||
|
||
BLOCK_SIZE = 8 # TODO(danielvegamyhre): tune this for perf | ||
|
||
num_elements = hp_tensor.numel() | ||
orig_shape = hp_tensor.shape | ||
flattened_input = hp_tensor.flatten() | ||
|
@@ -126,47 +202,80 @@ def triton_hp_tensor_to_float8_dynamic( | |
|
||
# allocate memory for computed scale, local block maxes, and output fp8 tensor | ||
scale_out = torch.empty((1,), dtype=torch.float32, device=hp_tensor.device) | ||
block_amaxes = torch.zeros( | ||
(num_elements // BLOCK_SIZE,), dtype=torch.float32, device=hp_tensor.device | ||
) | ||
|
||
fp8_output = torch.empty_like( | ||
flattened_input, dtype=fp8_dtype, device=hp_tensor.device | ||
) | ||
|
||
# compute local amax for each block | ||
grid = lambda meta: (triton.cdiv(num_elements, meta["BLOCK_SIZE"]),) | ||
_block_amax[grid]( | ||
flattened_input, | ||
block_amaxes, | ||
num_elements, | ||
input_dtype=tl_input_dtype, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
|
||
# calculate global amax across all blocks and use it to compute scale | ||
_fp8_scale[(1, 1, 1)]( | ||
block_amaxes, | ||
scale_out, | ||
num_elements, | ||
fp8_dtype_max, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
if algo == KernelAlgorithm.ATOMIC_MAX: | ||
global_amax = torch.zeros((1,), dtype=torch.float32, device=hp_tensor.device) | ||
# compute global amax to be used for scaling | ||
_block_amax_atomic[grid]( | ||
flattened_input, | ||
global_amax, | ||
num_elements, | ||
input_dtype=tl_input_dtype, | ||
EPS=EPS, | ||
) | ||
|
||
# perform conversion | ||
_to_fp8[grid]( | ||
flattened_input, | ||
scale_out, | ||
fp8_output, | ||
num_elements, | ||
fp8_dtype_min, | ||
fp8_dtype_max, | ||
input_dtype=tl_input_dtype, | ||
output_dtype=tl_output_dtype, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
torch.cuda.synchronize() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why is this needed? if helpful, I previously experimented with atomic max here (https://github.com/vkuzo/pytorch_scripts/tree/main/reduction_hack) and didn't need synchronization for correct numerics, although I didn't test the logic all the way up to the float8 conversion There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The unit test for conversion was failing non-deterministically, and forcing synchronization here was the only solution that reliably fixed it. This isn't ideal though so am hoping to find a better way. |
||
|
||
# perform conversion and store scale for use in Float8Tensor | ||
_to_fp8_atomic[grid]( | ||
flattened_input, | ||
scale_out, | ||
global_amax, | ||
fp8_output, | ||
num_elements, | ||
fp8_dtype_min, | ||
fp8_dtype_max, | ||
input_dtype=tl_input_dtype, | ||
output_dtype=tl_output_dtype, | ||
EPS=EPS, | ||
) | ||
elif algo == KernelAlgorithm.REDUCTION: | ||
max_block_size = 512 | ||
BLOCK_SIZE = min(max_block_size, num_elements) | ||
block_amaxes = torch.zeros( | ||
(num_elements // BLOCK_SIZE,), dtype=torch.float32, device=hp_tensor.device | ||
) | ||
# compute local amax for each block | ||
_block_amax_reduction[grid]( | ||
flattened_input, | ||
block_amaxes, | ||
num_elements, | ||
input_dtype=tl_input_dtype, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
|
||
# calculate global amax across all blocks and use it to compute scale | ||
_fp8_scale_reduction[(1, 1, 1)]( | ||
block_amaxes, | ||
scale_out, | ||
num_elements, | ||
fp8_dtype_max, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
|
||
# perform conversion | ||
_to_fp8_reduction[grid]( | ||
flattened_input, | ||
scale_out, | ||
fp8_output, | ||
num_elements, | ||
fp8_dtype_min, | ||
fp8_dtype_max, | ||
input_dtype=tl_input_dtype, | ||
output_dtype=tl_output_dtype, | ||
BLOCK_SIZE=BLOCK_SIZE, | ||
EPS=EPS, | ||
) | ||
else: | ||
raise ValueError(f"Unsupported kernel algorithm: {algo}") | ||
|
||
return Float8Tensor( | ||
fp8_output.reshape(orig_shape), | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why is there an
axis=0
here?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It isn't necessary, i've removed it.