From e374227221a5c72ce6fd12bf6bc9db8c72101546 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 28 Oct 2023 12:20:08 +0300 Subject: [PATCH] Revert "cuda : use CUBLAS_COMPUTE_16F for non-attention ops" This reverts commit 0f2498f25d7e278f075d060e8e77e68dacf4e90c. --- ggml-cuda.cu | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index d16b8f9c5517c..e03e500d7ae93 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6385,11 +6385,8 @@ inline void ggml_cuda_op_mul_mat_cublas( } const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; - size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); - - const half alpha = 1.0f; - const half beta = 0.0f; + const float alpha = 1.0f; + const float beta = 0.0f; CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); CUBLAS_CHECK( @@ -6397,15 +6394,10 @@ inline void ggml_cuda_op_mul_mat_cublas( row_diff, src1_ncols, ne10, &alpha, src0_ptr, CUDA_R_16F, ne00, src1_ptr, CUDA_R_16F, ne10, - &beta, dst_f16, CUDA_R_16F, ldc, - CUBLAS_COMPUTE_16F, + &beta, dst_dd_i, CUDA_R_32F, ldc, + CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); - - ggml_cuda_pool_free(dst_f16, dst_as); - if (src0_as != 0) { ggml_cuda_pool_free(src0_as_f16, src0_as); }