diff --git a/benchmarks/float8/profile_linear_float8.py b/benchmarks/float8/profile_linear_float8.py index 6afefa009..a42d4467c 100644 --- a/benchmarks/float8/profile_linear_float8.py +++ b/benchmarks/float8/profile_linear_float8.py @@ -27,6 +27,8 @@ Float8LinearConfig, ScalingType, ScalingGranularity, + _Float8LinearRecipeName, + _recipe_name_to_linear_config, ) from torchao.float8.float8_linear_utils import ( convert_to_float8_training, @@ -258,6 +260,7 @@ def main( scaling_type_weight: str = "dynamic", scaling_type_grad_output: str = "dynamic", scaling_granularity: str = "tensorwise", + recipe_name: Optional[str] = None, model_type: str = "linear", dtype_filter: str = "both", add_inductor_metadata_to_trace: bool = True, @@ -271,45 +274,51 @@ def main( scaling_type_grad_output = ScalingType(scaling_type_grad_output) scaling_granularity = ScalingGranularity(scaling_granularity) - if scaling_type_input is ScalingType.STATIC: - cast_config_input=CastConfig( - scaling_type=scaling_type_input, - static_scale=torch.tensor([1.0], device="cuda"), - scaling_granularity=scaling_granularity, - ) - else: - cast_config_input=CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - ) - if scaling_type_weight is ScalingType.STATIC: - cast_config_weight=CastConfig( - scaling_type=scaling_type_weight, - static_scale=torch.tensor([1.0], device="cuda"), - scaling_granularity=scaling_granularity, - ) - else: - cast_config_weight=CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - ) - if scaling_type_grad_output is ScalingType.STATIC: - cast_config_grad_output=CastConfig( - scaling_type=scaling_type_grad_output, - static_scale=torch.tensor([1.0], device="cuda"), - scaling_granularity=scaling_granularity, - ) - else: - cast_config_grad_output=CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, + if recipe_name is None: + + if scaling_type_input is ScalingType.STATIC: + cast_config_input=CastConfig( + scaling_type=scaling_type_input, + static_scale=torch.tensor([1.0], device="cuda"), + scaling_granularity=scaling_granularity, + ) + else: + cast_config_input=CastConfig( + scaling_type=scaling_type_input, + scaling_granularity=scaling_granularity, + ) + if scaling_type_weight is ScalingType.STATIC: + cast_config_weight=CastConfig( + scaling_type=scaling_type_weight, + static_scale=torch.tensor([1.0], device="cuda"), + scaling_granularity=scaling_granularity, + ) + else: + cast_config_weight=CastConfig( + scaling_type=scaling_type_weight, + scaling_granularity=scaling_granularity, + ) + if scaling_type_grad_output is ScalingType.STATIC: + cast_config_grad_output=CastConfig( + scaling_type=scaling_type_grad_output, + static_scale=torch.tensor([1.0], device="cuda"), + scaling_granularity=scaling_granularity, + ) + else: + cast_config_grad_output=CastConfig( + scaling_type=scaling_type_grad_output, + scaling_granularity=scaling_granularity, + ) + + config = Float8LinearConfig( + cast_config_input=cast_config_input, + cast_config_weight=cast_config_weight, + cast_config_grad_output=cast_config_grad_output, ) - config = Float8LinearConfig( - cast_config_input=cast_config_input, - cast_config_weight=cast_config_weight, - cast_config_grad_output=cast_config_grad_output, - ) + elif recipe_name is not None: + recipe_name = _Float8LinearRecipeName(recipe_name) + config = _recipe_name_to_linear_config(recipe_name) scaling_repr = "_".join( [ diff --git a/test/float8/test_base.py b/test/float8/test_base.py index 0aab91f55..66d152f2d 100644 --- a/test/float8/test_base.py +++ b/test/float8/test_base.py @@ -8,6 +8,7 @@ import itertools import random import re +from typing import List, Tuple import unittest import warnings @@ -27,6 +28,8 @@ Float8LinearConfig, ScalingGranularity, ScalingType, + _Float8LinearRecipeName, + _recipe_name_to_linear_config, ) from torchao.float8.float8_linear import Float8Linear from torchao.float8.float8_linear_utils import ( @@ -35,7 +38,10 @@ sync_float8_amax_and_scale_history, ) from torchao.float8.float8_python_api import addmm_float8_unwrapped -from torchao.float8.float8_scaling_utils import hp_tensor_to_float8_dynamic +from torchao.float8.float8_scaling_utils import ( + hp_tensor_to_float8_dynamic, + get_maybe_axiswise_dim, +) from torchao.float8.float8_tensor import ( Float8Tensor, GemmInputRole, @@ -51,6 +57,7 @@ FP8_TYPES, tensor_to_scale, ) +from torchao.testing.float8.test_utils import get_test_float8_linear_config random.seed(0) torch.manual_seed(0) @@ -59,6 +66,8 @@ is_cuda_8_9 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 9) is_cuda_9_0 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0) + + def bitwise_identical(a: Float8Tensor, b: Float8Tensor) -> bool: assert torch.all(a._scale == b._scale).item(), "scales are not identical" assert torch.all(a._data == b._data).item(), "data is not identical" @@ -205,9 +214,17 @@ def test_axiswise_reshape(self): a_fp8_d2_r2 = a_fp8_d2.reshape(3, -1) @pytest.mark.parametrize("a_shape", [(16, 32), (2, 16, 32), (1, 2, 16, 32)]) + @pytest.mark.parametrize( + "a_granularity,b_granularity", + [ + (ScalingGranularity.AXISWISE, ScalingGranularity.AXISWISE), + (ScalingGranularity.AXISWISE, ScalingGranularity.TENSORWISE), + (ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE), + ] + ) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") @unittest.skipIf(not is_cuda_9_0, "Requires CUDA capability >= 9.0") - def test_axiswise_gemm(self, a_shape): + def test_axiswise_gemm(self, a_shape, a_granularity, b_granularity): a = torch.randn(*a_shape, dtype=torch.bfloat16, device="cuda") b = torch.randn(64, 32, dtype=torch.bfloat16, device="cuda") @@ -218,18 +235,20 @@ def test_axiswise_gemm(self, a_shape): e4m3_dtype, linear_mm_config, gemm_input_role=GemmInputRole.INPUT, - scaling_granularity=ScalingGranularity.AXISWISE, - axiswise_dim=-1, + scaling_granularity=a_granularity, + axiswise_dim=get_maybe_axiswise_dim(-1, a_granularity), ) a_fp8 = a_fp8.reshape(-1, a_shape[-1]) + b_fp8 = hp_tensor_to_float8_dynamic( b, e4m3_dtype, linear_mm_config, gemm_input_role=GemmInputRole.WEIGHT, - scaling_granularity=ScalingGranularity.AXISWISE, - axiswise_dim=-1, # will be transposed + scaling_granularity=b_granularity, + axiswise_dim=get_maybe_axiswise_dim(-1, b_granularity), ) + c_fp8_compute = torch.mm(a_fp8, b_fp8.t()) a = a.reshape(-1, a_shape[-1]) c_ref = torch.mm(a, b.t()) @@ -322,79 +341,64 @@ def _test_linear_impl( ) @pytest.mark.parametrize( "scaling_type_grad_output", - [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], - ) - @pytest.mark.parametrize( - "scaling_granularity", - [ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE], + [ScalingType.DELAYED, ScalingType.DYNAMIC], ) @pytest.mark.parametrize("linear_dtype", [torch.bfloat16, torch.float32]) @pytest.mark.parametrize("linear_bias", [False, True]) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_linear( + def test_linear_from_config_params( self, x_shape, emulate: bool, scaling_type_input: ScalingType, scaling_type_weight: ScalingType, scaling_type_grad_output: ScalingType, - scaling_granularity: ScalingGranularity, linear_dtype: torch.dtype, linear_bias: bool, ): - if scaling_granularity is ScalingGranularity.AXISWISE: - if ( - scaling_type_input != ScalingType.DYNAMIC or - scaling_type_weight != ScalingType.DYNAMIC or - scaling_type_grad_output != ScalingType.DYNAMIC or - linear_dtype != torch.bfloat16 or - (not is_cuda_9_0) - ): - pytest.skip() - x = torch.randn(*x_shape, device="cuda", dtype=linear_dtype) m_ref = nn.Linear(16, 32, bias=linear_bias, device="cuda", dtype=linear_dtype) - if scaling_type_input is ScalingType.STATIC: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - ) - if scaling_type_weight is ScalingType.STATIC: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - ) - if scaling_type_grad_output is ScalingType.STATIC: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, - ) + config = get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, + emulate, + ) - config = Float8LinearConfig( - cast_config_input=cast_config_input, - cast_config_weight=cast_config_weight, - cast_config_grad_output=cast_config_grad_output, - emulate=emulate, + self._test_linear_impl( + x, + m_ref, + config, ) + + # Note: there are now too many config combinations to test all of + # them, so this function factors out some of the recipes which are annoying + # to combine with the main testing function. + # TODO(future PR): make this cleaner. + @pytest.mark.parametrize( + "recipe_name", + [_Float8LinearRecipeName.ALL_AXISWISE, _Float8LinearRecipeName.LW_AXISWISE_WITH_GW_HP], + ) + @pytest.mark.parametrize("x_shape", [(16, 16), (2, 16, 16), (3, 2, 16, 16)]) + @pytest.mark.parametrize("linear_bias", [True, False]) + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_linear_from_recipe( + self, + recipe_name, + x_shape, + linear_bias: bool, + ): + if torch.cuda.get_device_capability() < (9, 0): + warnings.warn( + f"CUDA capability {torch.cuda.get_device_capability()} < (9.0)" + ) + pytest.skip() + + linear_dtype = torch.bfloat16 + x = torch.randn(*x_shape, device="cuda", dtype=linear_dtype) + m_ref = nn.Linear(16, 32, bias=linear_bias, device="cuda", dtype=linear_dtype) + config = _recipe_name_to_linear_config(recipe_name) self._test_linear_impl( x, m_ref, diff --git a/test/float8/test_compile.py b/test/float8/test_compile.py index 317743288..c235685e6 100644 --- a/test/float8/test_compile.py +++ b/test/float8/test_compile.py @@ -5,6 +5,7 @@ # LICENSE file in the root directory of this source tree. import copy import random +from typing import List, Tuple import sys import unittest from io import StringIO @@ -22,7 +23,8 @@ CastConfig, Float8LinearConfig, ScalingType, - ScalingGranularity, + _Float8LinearRecipeName, + _recipe_name_to_linear_config, ) from torchao.float8.float8_linear import Float8Linear from torchao.float8.float8_linear_utils import ( @@ -40,6 +42,7 @@ ScaledMMConfig, ) from torchao.float8.float8_utils import e4m3_dtype +from torchao.testing.float8.test_utils import get_test_float8_linear_config from torch._dynamo.test_case import TestCase as DynamoTestCase from torch._dynamo.testing import CompileCounterWithBackend @@ -59,7 +62,8 @@ def _test_compile_base( x_shape = (16, 16) linear_dtype = torch.bfloat16 - x = torch.randn(*x_shape, device="cuda", dtype=linear_dtype) + x = torch.randn(*x_shape, device="cuda", dtype=linear_dtype).requires_grad_() + x_ref = copy.deepcopy(x) m_ref = nn.Linear(16, 32, bias=True, device="cuda", dtype=linear_dtype) m_fp8 = Float8Linear.from_float( @@ -71,7 +75,7 @@ def _test_compile_base( m_ref = torch.compile(m_ref, backend=backend, fullgraph=fullgraph) y_fp8 = m_fp8(x) y_fp8.sum().backward() - y_ref = m_ref(x) + y_ref = m_ref(x_ref) y_ref.sum().backward() # TODO(future PR): can also test fp8 eager vs compile here with a tigher # tolerance @@ -80,74 +84,7 @@ def _test_compile_base( m_fp8.weight.grad, m_ref.weight.grad, atol=2e-1, rtol=2e-1 ) torch.testing.assert_close(m_fp8.bias.grad, m_ref.bias.grad, atol=8e-2, rtol=8e-2) - -def _get_config( - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - scaling_granularity, - emulate, -): - if scaling_type_input is ScalingType.STATIC: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - ) - if scaling_type_weight is ScalingType.STATIC: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - ) - if scaling_type_grad_output is ScalingType.STATIC: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, - ) - - config = Float8LinearConfig( - cast_config_input=cast_config_input, - cast_config_weight=cast_config_weight, - cast_config_grad_output=cast_config_grad_output, - emulate=emulate, - ) - return config - - -def is_supported( - scaling_granularity, - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - dtype, -) -> bool: - if scaling_granularity is ScalingGranularity.AXISWISE: - if ( - scaling_type_input != ScalingType.DYNAMIC or - scaling_type_weight != ScalingType.DYNAMIC or - scaling_type_grad_output != ScalingType.DYNAMIC or - dtype != torch.bfloat16 or - (not is_H100) - ): - return False - return True + torch.testing.assert_close(x.grad, x_ref.grad, atol=8e-2, rtol=8e-2) @pytest.mark.parametrize("fullgraph", [True]) @@ -160,11 +97,8 @@ def is_supported( @pytest.mark.parametrize( "scaling_type_grad_output", [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC] ) -@pytest.mark.parametrize( - "scaling_granularity", [ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE] -) @pytest.mark.parametrize("emulate", [False, True] if is_cuda_8_9 else [True]) -@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float32]) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_eager_only( fullgraph, @@ -172,24 +106,13 @@ def test_eager_only( scaling_type_input: ScalingType, scaling_type_weight: ScalingType, scaling_type_grad_output: ScalingType, - scaling_granularity: ScalingGranularity, dtype: torch.dtype, ): - if not is_supported( - scaling_granularity, - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - dtype, - ): - pytest.skip() - torch._dynamo.reset() - config = _get_config( - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - scaling_granularity, + config = get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, emulate, ) _test_compile_base( @@ -211,10 +134,7 @@ def test_eager_only( @pytest.mark.parametrize( "scaling_type_grad_output", [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC] ) -@pytest.mark.parametrize( - "scaling_granularity", [ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE] -) -@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float32]) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_aot_eager( fullgraph, @@ -222,24 +142,13 @@ def test_aot_eager( scaling_type_input: ScalingType, scaling_type_weight: ScalingType, scaling_type_grad_output: ScalingType, - scaling_granularity: ScalingGranularity, dtype: torch.dtype, ): - if not is_supported( - scaling_granularity, - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - dtype, - ): - pytest.skip() - torch._dynamo.reset() - config = _get_config( - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - scaling_granularity, + config = get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, emulate, ) _test_compile_base( @@ -261,35 +170,21 @@ def test_aot_eager( @pytest.mark.parametrize( "scaling_type_grad_output", [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC] ) -@pytest.mark.parametrize( - "scaling_granularity", [ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE] -) @unittest.skipIf(not torch.cuda.is_available() or not is_cuda_8_9, "CUDA with float8 support not available") -@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) -def test_inductor( +@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float32]) +def test_inductor_from_config_params( fullgraph, emulate: bool, scaling_type_input: ScalingType, scaling_type_weight: ScalingType, scaling_type_grad_output: ScalingType, - scaling_granularity: ScalingGranularity, dtype: torch.dtype, ): - if not is_supported( - scaling_granularity, - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - dtype, - ): - pytest.skip() - torch._dynamo.reset() - config = _get_config( - scaling_type_input, - scaling_type_weight, - scaling_type_grad_output, - scaling_granularity, + config = get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, emulate, ) _test_compile_base( @@ -299,6 +194,27 @@ def test_inductor( dtype, ) +# Note: there are now too many config combinations to test all of +# them, so this function factors out some of the recipes which are annoying +# to combine with the main testing function. +# TODO(future PR): make this cleaner. +@pytest.mark.parametrize( + "recipe_name", + [_Float8LinearRecipeName.ALL_AXISWISE, _Float8LinearRecipeName.LW_AXISWISE_WITH_GW_HP], +) +@unittest.skipIf(not is_H100, "CUDA with capability 9.0 or greater not available") +def test_inductor_from_recipe(recipe_name): + torch._dynamo.reset() + config = _recipe_name_to_linear_config(recipe_name) + fullgraph = True + dtype = torch.bfloat16 + _test_compile_base( + "inductor", + fullgraph, + config, + dtype, + ) + class TestGraphBreaks(DynamoTestCase): class MockLinear(torch.nn.Module): diff --git a/test/float8/test_numerics_integration.py b/test/float8/test_numerics_integration.py index 07fcddaad..0cdd3a56d 100644 --- a/test/float8/test_numerics_integration.py +++ b/test/float8/test_numerics_integration.py @@ -24,6 +24,8 @@ Float8LinearConfig, ScalingType, ScalingGranularity, + _Float8LinearRecipeName, + _recipe_name_to_linear_config, ) from torchao.float8.float8_linear_utils import ( convert_to_float8_training, @@ -31,6 +33,7 @@ sync_float8_amax_and_scale_history, ) from torchao.float8.float8_utils import compute_error, IS_ROCM +from torchao.testing.float8.test_utils import get_test_float8_linear_config is_cuda_8_9 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 9) is_cuda_9_0 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0) @@ -84,44 +87,9 @@ def init_weights(self, init_std: float): class TestFloat8NumericsIntegrationTest: - @pytest.mark.parametrize( - "scaling_type_input", - [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], - ) - @pytest.mark.parametrize( - "scaling_type_weight", - [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], - ) - @pytest.mark.parametrize( - "scaling_type_grad_output", - [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], - ) - @pytest.mark.parametrize( - "scaling_granularity", - [ScalingGranularity.TENSORWISE, ScalingGranularity.AXISWISE], - ) - @pytest.mark.skipif(not is_cuda_8_9, reason="requires SM89 compatible machine") - @pytest.mark.skipif(IS_ROCM, reason="test doesn't currently work on the ROCm stack") - def test_encoder_fw_bw( - self, - scaling_type_input: ScalingType, - scaling_type_weight: ScalingType, - scaling_type_grad_output: ScalingType, - scaling_granularity: ScalingGranularity, - ): - # TODO(later): maybe add float16 back if it becomes important - data_dtype = torch.bfloat16 - - if scaling_granularity is ScalingGranularity.AXISWISE: - if ( - scaling_type_input != ScalingType.DYNAMIC or - scaling_type_weight != ScalingType.DYNAMIC or - scaling_type_grad_output != ScalingType.DYNAMIC or - data_dtype != torch.bfloat16 or - (not is_cuda_9_0) - ): - pytest.skip() + def _test_impl(self, config: Float8LinearConfig) -> None: + data_dtype = torch.bfloat16 # LLaMa 3 70B shapes model_ref = ( FeedForward( @@ -137,44 +105,6 @@ def test_encoder_fw_bw( # for now just test the encoder to simplify things model_fp8 = copy.deepcopy(model_ref) - if scaling_type_input is ScalingType.STATIC: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_input = CastConfig( - scaling_type=scaling_type_input, - scaling_granularity=scaling_granularity, - ) - if scaling_type_weight is ScalingType.STATIC: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_weight = CastConfig( - scaling_type=scaling_type_weight, - scaling_granularity=scaling_granularity, - ) - if scaling_type_grad_output is ScalingType.STATIC: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - static_scale=torch.tensor([1.0], device="cuda"), - ) - else: - cast_config_grad_output = CastConfig( - scaling_type=scaling_type_grad_output, - scaling_granularity=scaling_granularity, - ) - - config = Float8LinearConfig( - cast_config_input=cast_config_input, - cast_config_weight=cast_config_weight, - cast_config_grad_output=cast_config_grad_output, - ) - convert_to_float8_training( model_fp8, config=config, @@ -212,9 +142,9 @@ def test_encoder_fw_bw( out_sqnr = compute_error(model_ref_out, model_fp8_out) any_static_scaling = ( - scaling_type_input is ScalingType.STATIC - or scaling_type_weight is ScalingType.STATIC - or scaling_type_grad_output is ScalingType.STATIC + config.cast_config_input.scaling_type is ScalingType.STATIC + or config.cast_config_weight.scaling_type is ScalingType.STATIC + or config.cast_config_grad_output.scaling_type is ScalingType.STATIC ) if any_static_scaling: assert out_sqnr > 10.0 @@ -236,6 +166,47 @@ def test_encoder_fw_bw( sqnr = compute_error(ref_grad, cur_grad) assert sqnr > grad_sqnr_threshold + @pytest.mark.parametrize( + "scaling_type_input", + [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], + ) + @pytest.mark.parametrize( + "scaling_type_weight", + [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], + ) + @pytest.mark.parametrize( + "scaling_type_grad_output", + [ScalingType.DELAYED, ScalingType.DYNAMIC, ScalingType.STATIC], + ) + @pytest.mark.skipif(not is_cuda_8_9, reason="requires SM89 compatible machine") + @pytest.mark.skipif(IS_ROCM, reason="test doesn't currently work on the ROCm stack") + def test_encoder_fw_bw_from_config_params( + self, + scaling_type_input: ScalingType, + scaling_type_weight: ScalingType, + scaling_type_grad_output: ScalingType, + ): + config = get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, + emulate=False, + ) + self._test_impl(config) + + @pytest.mark.parametrize( + "recipe_name", + [_Float8LinearRecipeName.ALL_AXISWISE, _Float8LinearRecipeName.LW_AXISWISE_WITH_GW_HP], + ) + @pytest.mark.skipif(not is_cuda_9_0, reason="requires SM90 compatible machine") + @pytest.mark.skipif(IS_ROCM, reason="test doesn't currently work on the ROCm stack") + def test_encoder_fw_bw_from_recipe( + self, + recipe_name: str, + ): + config = _recipe_name_to_linear_config(recipe_name) + self._test_impl(config) + if __name__ == "__main__": pytest.main([__file__]) diff --git a/torchao/float8/config.py b/torchao/float8/config.py index 0ed6d2622..37a017683 100644 --- a/torchao/float8/config.py +++ b/torchao/float8/config.py @@ -48,12 +48,16 @@ def short_str(self): @dataclass(frozen=True) class CastConfig: """ - Configuration for casting a single tensor to float8 + Configuration for maybe casting a single tensor to float8 """ scaling_type: ScalingType = ScalingType.DYNAMIC scaling_granularity: ScalingGranularity = ScalingGranularity.TENSORWISE static_scale: Optional[torch.Tensor] = None + # If True, this tensor is not scaled to float8 and left in its original + # precision. + # TODO(ideally before this PR lands): a better name for this + keep_in_original_precision: bool = False def __post_init__(self): if self.scaling_type is ScalingType.STATIC: @@ -99,7 +103,7 @@ class Float8GemmConfig: use_fast_accum: bool = False -@dataclass(frozen=True) +@dataclass(frozen=False) class Float8LinearConfig: """ Configuration for converting a `torch.nn.Linear` module to float8 @@ -113,9 +117,23 @@ class Float8LinearConfig: cast_config_weight: CastConfig = CastConfig() cast_config_grad_output: CastConfig = CastConfig() + # + # Optional per-tensor configuration for `input`, `weight`, `grad_output` to + # calculate `grad_weight`, `grad_input`, and `grad_weight` respectively. + # If not specified, then the configuration from `cast_config_input`, + # `cast_config_weight` and `cast_config_grad_output`, respectively, is reused. + # TODO(future PR): maybe rename `cast_config_input` to + # `cast_config_input_for_output`, etc, to make the names consistent, + # will be BC-breaking. + # + cast_config_input_for_grad_weight: Optional[CastConfig] = None + cast_config_weight_for_grad_input: Optional[CastConfig] = None + cast_config_grad_output_for_grad_weight: Optional[CastConfig] = None + # # Per-gemm configuration for gemms calculating `output`, `grad_input` and # `grad_weight` + # TODO(this PR): throw warning if fast_accum False is used with axiswise scaling # gemm_config_output: Float8GemmConfig = Float8GemmConfig(use_fast_accum=True) gemm_config_grad_input: Float8GemmConfig = Float8GemmConfig() @@ -174,28 +192,134 @@ class Float8LinearConfig: force_recompute_fp8_weight_in_bwd: bool = False def __post_init__(self): + # populate the additional cast overrides, if the user did not specify them + if self.cast_config_input_for_grad_weight is None: + self.cast_config_input_for_grad_weight = self.cast_config_input + if self.cast_config_weight_for_grad_input is None: + self.cast_config_weight_for_grad_input = self.cast_config_weight + if self.cast_config_grad_output_for_grad_weight is None: + self.cast_config_grad_output_for_grad_weight = self.cast_config_grad_output + # float8 all-gather only supports tensorwise, in the future may support blockwise if self.cast_config_weight.scaling_granularity != ScalingGranularity.TENSORWISE: assert not self.enable_fsdp_float8_all_gather, \ f"enable_fsdp_float8_all_gather only supports tensorwise scaling granularity, got {self.cast_config_weight.scaling_granularity}" - # for now, axiswise granularity is all-or-nothing - # TODO(future PR): enable more granular setting per-gemm-input - has_any_axiswise_scaling = ( - self.cast_config_input.scaling_granularity is ScalingGranularity.AXISWISE or - self.cast_config_weight.scaling_granularity is ScalingGranularity.AXISWISE or - self.cast_config_grad_output.scaling_granularity is ScalingGranularity.AXISWISE - ) - has_all_axiswise_scaling = ( - self.cast_config_input.scaling_granularity is ScalingGranularity.AXISWISE and - self.cast_config_weight.scaling_granularity is ScalingGranularity.AXISWISE and - self.cast_config_grad_output.scaling_granularity is ScalingGranularity.AXISWISE - ) - if has_any_axiswise_scaling: - assert has_all_axiswise_scaling, \ - "for now, axiswise scaling must be enabled for either all casts or none of the casts" + # save some characters in the compatibility checks below + cc_i = self.cast_config_input + cc_w = self.cast_config_weight + cc_go = self.cast_config_grad_output + cc_i_gw = self.cast_config_input_for_grad_weight + cc_w_gi = self.cast_config_weight_for_grad_input + cc_go_gw = self.cast_config_grad_output_for_grad_weight + + # for now, we only have gemm kernels where both operands are either both + # in high precision, or both in float8. In the future, this may be relaxed. + # TODO(future): make the float8 check more precise with the specific dtypes. + assert cc_i.keep_in_original_precision == cc_w.keep_in_original_precision, \ + "incompatible operand precision for output" + assert cc_go.keep_in_original_precision == cc_w_gi.keep_in_original_precision, \ + "incompatible operand precision for grad_input" + assert cc_i_gw.keep_in_original_precision == cc_go_gw.keep_in_original_precision, \ + "incompatible operand precision for grad_weight" + # If True, use 'fnuz' float8 types for calculations. # Currently, ROCm only supports fnuz variants. # TODO(future PR): move this to Float8LinearConfig use_fnuz_dtype = False + + +# Pre-made recipes for common configurations +# TODO(future PR): go through a round of design on this, and eventually expose +# as a top level public API. +class _Float8LinearRecipeName(enum.Enum): + ALL_TENSORWISE = "all_tensorwise" + ALL_AXISWISE = "all_axiswise" + LW_AXISWISE_WITH_GW_HP = "lw_axiswise_with_gw_hp" + + +def _recipe_name_to_linear_config( + recipe_name: _Float8LinearRecipeName, +) -> Float8LinearConfig: + """ + Input: `_Float8LinearRecipeName` value + Output: a `Float8LinearConfig` configured to implement the recipe + """ + + if recipe_name is _Float8LinearRecipeName.ALL_TENSORWISE: + # Default, dynamic per-tensor scaling with the cuBLAS tensorwise kernel + return Float8LinearConfig() + + elif recipe_name is _Float8LinearRecipeName.ALL_AXISWISE: + # dynamic axiswise scaling with the CUTLASS rowwise kernel + cc_i = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + cc_w = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + cc_go = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + + # The current rowwise CUTLASS kernels in `torch._scaled_mm` are only + # fast with `use_fast_accum=True`. Note that rowwise scaling is more + # accurate than tensorwise scaling, so the overall impact on accuracy + # of tensorwise vs rowwise taking this flag into account will vary. + gc_o = Float8GemmConfig(use_fast_accum=True) + gc_gi = Float8GemmConfig(use_fast_accum=True) + gc_gw = Float8GemmConfig(use_fast_accum=True) + + return Float8LinearConfig( + cast_config_input=cc_i, + cast_config_weight=cc_w, + cast_config_grad_output=cc_go, + gemm_config_output=gc_o, + gemm_config_grad_input=gc_gi, + gemm_config_grad_weight=gc_gw, + ) + + elif recipe_name is _Float8LinearRecipeName.LW_AXISWISE_WITH_GW_HP: + + # lw's recipe for a modification on all-axiswise: + # + # output_hp = input_fp8_axiswise_dim0 @ weight_t_axiswise_dim1 + # grad_input_hp = grad_output_fp8_axiswise_dim0 @ weight_fp8_tensorwise + # grad_weight_hp = input_t_hp @ grad_output_hp + # + # key characteristics: + # * increased accuracy for grad_weight + # * `output` and `weight` now only need to be scaled axiswise across a + # single dim compared to vanilla all-axiswise, which is more + # amenable to fast kernels + + # output_hp = input_fp8_axiswise_dim0 @ weight_t_axiswise_dim1 + cc_i = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + cc_w = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + + # grad_input_hp = grad_output_fp8_axiswise_dim0 @ weight_fp8_tensorwise + cc_go = CastConfig(scaling_granularity=ScalingGranularity.AXISWISE) + cc_w_gi = CastConfig(scaling_granularity=ScalingGranularity.TENSORWISE) + + # grad_weight_hp = input_t_hp @ grad_output_hp + cc_i_gw = CastConfig(keep_in_original_precision=True) + cc_go_gw = CastConfig(keep_in_original_precision=True) + + # The current rowwise CUTLASS kernels in `torch._scaled_mm` are only + # fast with `use_fast_accum=True`. Note that rowwise scaling is more + # accurate than tensorwise scaling, so the overall impact on accuracy + # of tensorwise vs rowwise taking this flag into account will vary. + gc_o = Float8GemmConfig(use_fast_accum=True) + gc_gi = Float8GemmConfig(use_fast_accum=True) + gc_gw = Float8GemmConfig(use_fast_accum=True) + + return Float8LinearConfig( + cast_config_input=cc_i, + cast_config_weight=cc_w, + cast_config_grad_output=cc_go, + cast_config_input_for_grad_weight=cc_i_gw, + cast_config_weight_for_grad_input=cc_w_gi, + cast_config_grad_output_for_grad_weight=cc_go_gw, + gemm_config_output=gc_o, + gemm_config_grad_input=gc_gi, + gemm_config_grad_weight=gc_gw, + ) + + else: + # TODO(before land): make recipe_name an enum and tell users what the options are + raise AssertionError(f"unknown recipe_name {recipe_name}") diff --git a/torchao/float8/float8_linear.py b/torchao/float8/float8_linear.py index 9aaffa99c..e8a5bf419 100644 --- a/torchao/float8/float8_linear.py +++ b/torchao/float8/float8_linear.py @@ -23,6 +23,7 @@ hp_tensor_to_float8_delayed, hp_tensor_to_float8_dynamic, hp_tensor_to_float8_static, + get_maybe_axiswise_dim, NoopFwToFloat8E5M2BwDelayed, NoopFwToFloat8E5M2BwDynamic, NoopFwToFloat8E5M2BwStatic, @@ -37,10 +38,12 @@ ) from torchao.float8.float8_utils import ( - e4m3_dtype, - e5m2_dtype, + e4m3_dtype, + e5m2_dtype, tensor_to_amax, tensor_to_scale, + float8_linear_config_to_concise_casts_config, + Float8LinearConciseCastsConfig, ) from torchao.float8.fsdp_utils import ( @@ -122,54 +125,56 @@ class manual_float8_matmul_with_args_in_hp(torch.autograd.Function): and other granularities in a separate PR. """ - # TODO(this PR): types of inputs @staticmethod def forward( ctx, input_hp: torch.Tensor, weight_hp_t: torch.Tensor, linear_mm_config: LinearMMConfig, - input_scaling_granularity: ScalingGranularity, - weight_scaling_granularity: ScalingGranularity, - grad_output_scaling_granularity: ScalingGranularity, + concise_casts_config: Float8LinearConciseCastsConfig, ): ctx.save_for_backward(input_hp, weight_hp_t) ctx.linear_mm_config = linear_mm_config - ctx.input_scaling_granularity = input_scaling_granularity - ctx.weight_scaling_granularity = weight_scaling_granularity - ctx.grad_output_scaling_granularity = grad_output_scaling_granularity - - input_fp8 = hp_tensor_to_float8_dynamic( - input_hp, - e4m3_dtype, - linear_mm_config, - gemm_input_role=GemmInputRole.INPUT, - scaling_granularity=input_scaling_granularity, - axiswise_dim=-1, - ) + ctx.concise_casts_config = concise_casts_config - weight_fp8_t = hp_tensor_to_float8_dynamic( - weight_hp_t, - e4m3_dtype, - linear_mm_config, - gemm_input_role=GemmInputRole.WEIGHT, - scaling_granularity=weight_scaling_granularity, - axiswise_dim=0, - ) + c = concise_casts_config + + if c.cc_i.orig_prec: + input_maybe_fp8 = input_hp + else: + input_maybe_fp8 = hp_tensor_to_float8_dynamic( + input_hp, + e4m3_dtype, + linear_mm_config, + gemm_input_role=GemmInputRole.INPUT, + scaling_granularity=c.cc_i.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(-1, c.cc_i.sc_gr), + ) + + if c.cc_w.orig_prec: + weight_maybe_fp8_t = weight_hp_t + else: + weight_maybe_fp8_t = hp_tensor_to_float8_dynamic( + weight_hp_t, + e4m3_dtype, + linear_mm_config, + gemm_input_role=GemmInputRole.WEIGHT, + scaling_granularity=c.cc_w.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(0, c.cc_w.sc_gr), + ) # the reshapes are needed in order to make the shapes compatible with # torch.mm - orig_shape = input_fp8.shape - input_fp8_reshaped = input_fp8.reshape(-1, orig_shape[-1]) - res_bits = torch.mm(input_fp8_reshaped, weight_fp8_t) + orig_shape = input_maybe_fp8.shape + input_maybe_fp8_reshaped = input_maybe_fp8.reshape(-1, orig_shape[-1]) + res_bits = torch.mm(input_maybe_fp8_reshaped, weight_maybe_fp8_t) res_bits = res_bits.reshape(*orig_shape[:-1], res_bits.shape[-1]) return res_bits @staticmethod def backward(ctx, grad_output): input_hp, weight_hp_t = ctx.saved_tensors - - # TODO scaling + c = ctx.concise_casts_config # the reshapes are needed in order to make the shapes compatible with # torch.mm @@ -182,26 +187,37 @@ def backward(ctx, grad_output): # calculate grad_input # - grad_output_reshaped_fp8_dim0 = hp_tensor_to_float8_dynamic( - grad_output_reshaped, - e5m2_dtype, - ctx.linear_mm_config, - gemm_input_role=GemmInputRole.GRAD_OUTPUT, - scaling_granularity=ctx.grad_output_scaling_granularity, - axiswise_dim=-1, - ) - weight_t_fp8_dim0 = hp_tensor_to_float8_dynamic( - weight_hp_t, - e4m3_dtype, - ctx.linear_mm_config, - gemm_input_role=GemmInputRole.WEIGHT, - scaling_granularity=ctx.weight_scaling_granularity, - axiswise_dim=-1, # will be transposed - ) + if c.cc_go.orig_prec: + grad_output_reshaped_maybe_fp8_dim0 = grad_output_reshaped + else: + grad_output_reshaped_maybe_fp8_dim0 = hp_tensor_to_float8_dynamic( + grad_output_reshaped, + e5m2_dtype, + ctx.linear_mm_config, + gemm_input_role=GemmInputRole.GRAD_OUTPUT, + scaling_granularity=c.cc_go.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(-1, c.cc_go.sc_gr), + ) + + if c.cc_w_gi.orig_prec: + weight_t_maybe_fp8_dim0 = weight_hp_t + else: + # Note: we need https://github.com/pytorch/pytorch/issues/136267 + # to be solved to have a chance to reuse max(abs(weight, dim=...)) + # from the forward to get max(abs(weight)) here without reading + # the entire tensor. + weight_t_maybe_fp8_dim0 = hp_tensor_to_float8_dynamic( + weight_hp_t, + e4m3_dtype, + ctx.linear_mm_config, + gemm_input_role=GemmInputRole.WEIGHT, + scaling_granularity=c.cc_w_gi.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(-1, c.cc_w_gi.sc_gr), + ) grad_input = torch.mm( - grad_output_reshaped_fp8_dim0, - weight_t_fp8_dim0.t(), + grad_output_reshaped_maybe_fp8_dim0, + weight_t_maybe_fp8_dim0.t(), ) grad_input = grad_input.reshape( *grad_output_orig_shape[:-1], grad_input.shape[-1] @@ -214,29 +230,38 @@ def backward(ctx, grad_output): # calculate grad_weight # - grad_output_reshaped_fp8_dim1 = hp_tensor_to_float8_dynamic( - grad_output_reshaped, - e5m2_dtype, - ctx.linear_mm_config, - gemm_input_role=GemmInputRole.GRAD_OUTPUT, - scaling_granularity=ctx.grad_output_scaling_granularity, - axiswise_dim=0, # will be transposed - ) - input_reshaped_fp8_dim1 = hp_tensor_to_float8_dynamic( - input_hp_reshaped, - e4m3_dtype, - ctx.linear_mm_config, - gemm_input_role=GemmInputRole.INPUT, - scaling_granularity=ctx.input_scaling_granularity, - axiswise_dim=0, - ) + if c.cc_go_gw.orig_prec: + grad_output_reshaped_maybe_fp8_dim1 = grad_output_reshaped + else: + grad_output_reshaped_maybe_fp8_dim1 = hp_tensor_to_float8_dynamic( + grad_output_reshaped, + e5m2_dtype, + ctx.linear_mm_config, + gemm_input_role=GemmInputRole.GRAD_OUTPUT, + scaling_granularity=c.cc_go_gw.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(0, c.cc_go_gw.sc_gr), + ) + + if c.cc_i_gw.orig_prec: + input_reshaped_maybe_fp8_dim1 = input_hp_reshaped + else: + input_reshaped_maybe_fp8_dim1 = hp_tensor_to_float8_dynamic( + input_hp_reshaped, + e4m3_dtype, + ctx.linear_mm_config, + gemm_input_role=GemmInputRole.INPUT, + scaling_granularity=c.cc_i_gw.sc_gr, + axiswise_dim=get_maybe_axiswise_dim(0, c.cc_i_gw.sc_gr), + ) grad_weight = torch.mm( - grad_output_reshaped_fp8_dim1.t(), - input_reshaped_fp8_dim1, + grad_output_reshaped_maybe_fp8_dim1.t(), + input_reshaped_maybe_fp8_dim1, ) - return grad_input, grad_weight.t(), None, None, None, None + empty_grads = None, None + + return grad_input, grad_weight.t(), *empty_grads class Float8Linear(torch.nn.Linear): @@ -321,6 +346,9 @@ def __init__(self, *args, **kwargs): # would be initialized in every iteration. self.enable_pre_and_post_forward = self.config.enable_pre_and_post_forward + self.concise_casts_config: Float8LinearConciseCastsConfig = \ + float8_linear_config_to_concise_casts_config(self.config) + def create_buffers(self): # Default values for history buffers, see above TODO history_len = self.config.delayed_scaling_config.history_len @@ -568,9 +596,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input, self.weight.t(), self.linear_mm_config, - self.config.cast_config_input.scaling_granularity, - self.config.cast_config_weight.scaling_granularity, - self.config.cast_config_grad_output.scaling_granularity, + self.concise_casts_config, ) if self.bias is not None: @@ -588,10 +614,14 @@ def scaling_type_repr(self): def scaling_granularity_repr(self): # add scaling granularity settings without using too many characters # example: "i:ten,w:ten,g:ten" or "i:axs,w:axs,g:axs" - gi = self.config.cast_config_input.scaling_granularity.short_str() - gw = self.config.cast_config_weight.scaling_granularity.short_str() - ggo = self.config.cast_config_grad_output.scaling_granularity.short_str() - return f"i:{gi},w:{gw},go:{ggo}" + c = self.config + gi = c.cast_config_input.scaling_granularity.short_str() + gw = c.cast_config_weight.scaling_granularity.short_str() + ggo = c.cast_config_grad_output.scaling_granularity.short_str() + gi2 = c.cast_config_input_for_grad_weight.scaling_granularity.short_str() + gw2 = c.cast_config_weight_for_grad_input.scaling_granularity.short_str() + ggo2 = c.cast_config_grad_output_for_grad_weight.scaling_granularity.short_str() + return f"i:{gi},w:{gw},go:{ggo},i2:{gi2},w2:{gw2},go2:{ggo2}" def extra_repr(self): s = f'{super().extra_repr()}, scaling_type="{self.scaling_type_repr()}", scaling_granularity="{self.scaling_granularity_repr()}"' diff --git a/torchao/float8/float8_ops.py b/torchao/float8/float8_ops.py index b97d03211..8f5bc768e 100644 --- a/torchao/float8/float8_ops.py +++ b/torchao/float8/float8_ops.py @@ -251,6 +251,20 @@ def preprocess_addmm(a: Float8Tensor, b: Float8Tensor): if is_row_major(b_data.stride()): b_data = b_data.t().contiguous().t() b_scale = b._scale + + # Today, torch._scaled_mm only supports both operands using the + # same granularity. The code below checks for cases where one + # operand is scaled axiswise and one tensorwise. If this case is found, + # we reshape the tensorwise scale to be repeat along the needed axis, + # so that torch._scaled_mm can call the axiswise-axiswise kernel. + # Note: using shape/size info does not work with compile here, which is + # why we are using inferring scaling type from the presence of + # axiswise_dim. + if a._axiswise_dim is None and b._axiswise_dim is not None: + a_scale = a_scale.repeat(a_data.shape[0]).reshape(-1, 1) + elif a._axiswise_dim is not None and b._axiswise_dim is None: + b_scale = b_scale.repeat(b_data.shape[1]).reshape(1, -1) + return a_data, a_scale, b_data, b_scale diff --git a/torchao/float8/float8_scaling_utils.py b/torchao/float8/float8_scaling_utils.py index 3207c0c9f..fc22a4e35 100644 --- a/torchao/float8/float8_scaling_utils.py +++ b/torchao/float8/float8_scaling_utils.py @@ -143,6 +143,21 @@ def hp_tensor_to_float8_static( ) +def get_maybe_axiswise_dim( + axiswise_dim: int, + scaling_granularity: ScalingGranularity, +) -> Optional[int]: + """ + Convenience function which takes in an axiswise dim which is only relevant + for axiswise scaing, and a scaling type. The output is pass-through + if scaling type is axiswise, and None otherwise. This is done to keep the + logic from choosing the axiswise dim out of the scaling function. + """ + if scaling_granularity is ScalingGranularity.AXISWISE: + return axiswise_dim + return None + + def _maybe_initialize_amaxes_scales_for_float8_cast( x, cur_amax, diff --git a/torchao/float8/float8_utils.py b/torchao/float8/float8_utils.py index b6f42c508..1ee0101ad 100644 --- a/torchao/float8/float8_utils.py +++ b/torchao/float8/float8_utils.py @@ -4,7 +4,7 @@ # This source code is licensed under the BSD 3-Clause license found in the # LICENSE file in the root directory of this source tree. -from typing import Iterable, Literal, Optional, Tuple, Union +from typing import Iterable, Literal, NamedTuple, Optional, Tuple, Union import torch import torch.distributed as dist @@ -264,3 +264,61 @@ def pad_tensor_for_matmul( pad_dim2 = dim2_aligned - dim2 return torch.nn.functional.pad(tensor, (0, pad_dim2, 0, pad_dim1)) + + +# The code below introduces a bit of duplication with Float8LinearConfig in +# order to improve readability of the implementation of how Float8Linear +# uses the config. Specifically, we do two things: +# 1. wrap the relevant parts of configs in namedtuple, so we can pass +# them around in compile-friendly code. +# 2. make the tuple key names more brief, to make the implementation +# code less verbose (the code was so verbose that I felt the need +# to add this workaround). +# As I was writing this, it became less and less clear on why not just have +# a namedtuple as a top level config. Punting that to a future PR as +# that might be BC-breaking, but probably worth exploring. +# Note: I also think below is pretty hacky, it's good enough to unblock +# further prototyping, but IMO pretty important to clean up sooner rather +# than later. + + +class ConciseCastConfig(NamedTuple): + sc_tp: config.ScalingType + sc_gr: config.ScalingGranularity + st_sc: Optional[torch.Tensor] + orig_prec: bool + + @classmethod + def from_cast_config(cls, c: config.CastConfig): + return cls( + sc_tp=c.scaling_type, + sc_gr=c.scaling_granularity, + st_sc=c.static_scale, + orig_prec=c.keep_in_original_precision, + ) + + +class Float8LinearConciseCastsConfig(NamedTuple): + cc_i: ConciseCastConfig + cc_w: ConciseCastConfig + cc_go: ConciseCastConfig + cc_i_gw: ConciseCastConfig + cc_w_gi: ConciseCastConfig + cc_go_gw: ConciseCastConfig + + +def float8_linear_config_to_concise_casts_config( + c: config.Float8LinearConfig, +) -> Float8LinearConciseCastsConfig: + concise_config = Float8LinearConciseCastsConfig( + cc_i=ConciseCastConfig.from_cast_config(c.cast_config_input), + cc_w=ConciseCastConfig.from_cast_config(c.cast_config_weight), + cc_go=ConciseCastConfig.from_cast_config(c.cast_config_grad_output), + cc_i_gw=ConciseCastConfig.from_cast_config(c.cast_config_input_for_grad_weight), + cc_w_gi=ConciseCastConfig.from_cast_config(c.cast_config_weight_for_grad_input), + cc_go_gw=ConciseCastConfig.from_cast_config( + c.cast_config_grad_output_for_grad_weight + ), + ) + + return concise_config diff --git a/torchao/testing/float8/test_utils.py b/torchao/testing/float8/test_utils.py new file mode 100644 index 000000000..7f37c3f30 --- /dev/null +++ b/torchao/testing/float8/test_utils.py @@ -0,0 +1,50 @@ +import torch +from torchao.float8.config import ( + ScalingGranularity, + ScalingType, + CastConfig, + Float8LinearConfig, +) + + +def get_test_float8_linear_config( + scaling_type_input, + scaling_type_weight, + scaling_type_grad_output, + emulate: bool, +): + static_scale_one = torch.tensor([1.0], device="cuda") + + if scaling_type_input is ScalingType.STATIC: + static_scale_input = static_scale_one + else: + static_scale_input = None + if scaling_type_weight is ScalingType.STATIC: + static_scale_weight = static_scale_one + else: + static_scale_weight = None + if scaling_type_grad_output is ScalingType.STATIC: + static_scale_grad_output = static_scale_one + else: + static_scale_grad_output = None + + cast_config_input = CastConfig( + scaling_type=scaling_type_input, + static_scale=static_scale_input, + ) + cast_config_weight = CastConfig( + scaling_type=scaling_type_weight, + static_scale=static_scale_weight, + ) + cast_config_grad_output = CastConfig( + scaling_type=scaling_type_grad_output, + static_scale=static_scale_grad_output, + ) + + config = Float8LinearConfig( + cast_config_input=cast_config_input, + cast_config_weight=cast_config_weight, + cast_config_grad_output=cast_config_grad_output, + emulate=emulate, + ) + return config