diff --git a/sw/apps/atax/scripts/datagen.py b/sw/apps/atax/scripts/datagen.py index fbed6fa5ba..ffa633ff39 100755 --- a/sw/apps/atax/scripts/datagen.py +++ b/sw/apps/atax/scripts/datagen.py @@ -21,7 +21,7 @@ class AtaxDataGen(du.DataGen): def golden_model(self, A, x): return np.matmul(A.transpose(), np.matmul(A, x)) - def validate_config(self, M, N, **kwargs): + def validate(self, M, N, **kwargs): assert (N % 8) == 0, "N must be an integer multiple of the number of cores" # Calculate total TCDM occupation @@ -39,7 +39,7 @@ def emit_header(self, **kwargs): header = [super().emit_header()] # Validate parameters - self.validate_config(**kwargs) + self.validate(**kwargs) M, N = kwargs['M'], kwargs['N'] A = du.generate_random_array((M, N)) diff --git a/sw/apps/correlation/scripts/datagen.py b/sw/apps/correlation/scripts/datagen.py index 0f8653aed5..57d18fac8b 100755 --- a/sw/apps/correlation/scripts/datagen.py +++ b/sw/apps/correlation/scripts/datagen.py @@ -21,7 +21,7 @@ class CorrelationDataGen(du.DataGen): def golden_model(self, data): return np.corrcoef(data, rowvar=False) - def validate_config(self, M, N, **kwargs): + def validate(self, M, N, **kwargs): assert (M % 8) == 0, "M must be an integer multiple of the number of cores" # Calculate total TCDM occupation @@ -37,7 +37,7 @@ def emit_header(self, **kwargs): header = [super().emit_header()] # Validate parameters - self.validate_config(**kwargs) + self.validate(**kwargs) M, N = kwargs['M'], kwargs['N'] data = du.generate_random_array((N, M)) diff --git a/sw/apps/kmeans/scripts/datagen.py b/sw/apps/kmeans/scripts/datagen.py index f0053eb6d9..f99f043c0f 100755 --- a/sw/apps/kmeans/scripts/datagen.py +++ b/sw/apps/kmeans/scripts/datagen.py @@ -57,7 +57,7 @@ def visualize_clusters(self, samples, centroids, title=None): plt.ylabel("Feature 2") plt.show() - def validate_config(self, **kwargs): + def validate(self, **kwargs): assert (kwargs['n_samples'] % 8) == 0, 'Number of samples must be a multiple of the' \ ' number of cores' @@ -65,7 +65,7 @@ def emit_header(self, **kwargs): header = [super().emit_header()] # Validate parameters - self.validate_config(**kwargs) + self.validate(**kwargs) # Aliases n_samples = kwargs['n_samples'] diff --git a/sw/blas/axpy/scripts/datagen.py b/sw/blas/axpy/scripts/datagen.py index 38634dd5ee..30b179fec4 100755 --- a/sw/blas/axpy/scripts/datagen.py +++ b/sw/blas/axpy/scripts/datagen.py @@ -21,7 +21,7 @@ class AxpyDataGen(du.DataGen): def golden_model(self, a, x, y): return a*x + y - def validate_config(self, **kwargs): + def validate(self, **kwargs): assert kwargs['n'] % kwargs['n_tiles'] == 0, "n must be an integer multiple of n_tiles" n_per_tile = kwargs['n'] // kwargs['n_tiles'] assert (n_per_tile % 8) == 0, "n must be an integer multiple of the number of cores" @@ -36,7 +36,7 @@ def validate_config(self, **kwargs): def emit_header(self, **kwargs): header = [super().emit_header()] - self.validate_config(**kwargs) + self.validate(**kwargs) a = du.generate_random_array(1)[0] x = du.generate_random_array(kwargs['n']) diff --git a/sw/blas/gemm/scripts/datagen.py b/sw/blas/gemm/scripts/datagen.py index bead5121d5..c6507469e0 100755 --- a/sw/blas/gemm/scripts/datagen.py +++ b/sw/blas/gemm/scripts/datagen.py @@ -42,7 +42,7 @@ def infer_implementation(self, gemm_fp): prec, impl = re.search(r'gemm_fp(\d+)_(\w+)', gemm_fp).group(1, 2) return (int(prec) / 8), impl - def validate_config(self, gemm_fp, parallelize_m, + def validate(self, gemm_fp, parallelize_m, parallelize_k, m_tiles, n_tiles, k_tiles, transa, transb, M, N, K, beta, **kwargs): frac_m = M / m_tiles @@ -90,7 +90,7 @@ def emit_header(self, **kwargs): header = [super().emit_header()] # Validate parameters - self.validate_config(**kwargs) + self.validate(**kwargs) M, N, K = kwargs['M'], kwargs['N'], kwargs['K'] diff --git a/sw/dnn/flashattention_2/scripts/datagen.py b/sw/dnn/flashattention_2/scripts/datagen.py index f54ff7a08f..109f10e82a 100755 --- a/sw/dnn/flashattention_2/scripts/datagen.py +++ b/sw/dnn/flashattention_2/scripts/datagen.py @@ -138,7 +138,7 @@ def exact_flexfloat_golden_model(Q, K, V, B_r, B_c, desc): # Verify layer parameters are valid -def validate_config(L, S, d, B_r, B_c, dtype, baseline, gemm_impl): +def validate(L, S, d, B_r, B_c, dtype, baseline, gemm_impl): assert (L % B_r) == 0, 'L is not an integer multiple of B_r' assert (S % B_c) == 0, 'S is not an integer multiple of B_c' assert dtype != 'FP64', 'FP64 precision is not supported yet' @@ -164,20 +164,20 @@ def validate_config(L, S, d, B_r, B_c, dtype, baseline, gemm_impl): data_utils.validate_tcdm_footprint(total_size) # Q*K^t - gemm.GemmDataGen().validate_config( + gemm.GemmDataGen().validate( gemm_fp=gemm_impl, parallelize_m=0, parallelize_k=0, m_tiles=1, n_tiles=1, k_tiles=1, transa=0, transb=1, M=B_r, N=B_c, K=d, beta=0 ) # P*V if baseline: - gemm.GemmDataGen().validate_config( + gemm.GemmDataGen().validate( gemm_fp=gemm_impl, parallelize_m=0, parallelize_k=0, m_tiles=1, n_tiles=1, k_tiles=1, transa=0, transb=0, M=B_r, N=d, K=B_c, beta=1 ) else: # P*(V^t)^t - gemm.GemmDataGen().validate_config( + gemm.GemmDataGen().validate( gemm_fp=gemm_impl, parallelize_m=0, parallelize_k=0, m_tiles=1, n_tiles=1, k_tiles=1, transa=0, transb=1, M=B_r, N=d, K=B_c, beta=1 ) @@ -204,7 +204,7 @@ def emit_header(section, params): prec = params['dtype'] gemm_impl = get_gemm_implementation(params) - validate_config(gemm_impl=gemm_impl, **params) + validate(gemm_impl=gemm_impl, **params) # torch_type = data_utils.torch_type_from_precision_t(prec) ff_desc = data_utils.ff_desc_from_precision_t(prec) diff --git a/sw/dnn/layernorm/scripts/datagen.py b/sw/dnn/layernorm/scripts/datagen.py index 37f1fb3b5f..4868546d14 100755 --- a/sw/dnn/layernorm/scripts/datagen.py +++ b/sw/dnn/layernorm/scripts/datagen.py @@ -46,7 +46,7 @@ def golden_model_torch(ifmap, eps, shape): return ln(ifmap) -def validate_config(**kwargs): +def validate(**kwargs): # Aliases batch_size = kwargs['input_dim']['batch_size'] seq_len = kwargs['input_dim']['seq_len'] @@ -74,7 +74,7 @@ def validate_config(**kwargs): def emit_header(**kwargs): # Validate parameters - validate_config(**kwargs) + validate(**kwargs) batch_size = kwargs['input_dim']['batch_size'] seq_len = kwargs['input_dim']['seq_len']