Skip to content

Commit

Permalink
kompute : fix fallback to CPU (ggerganov#5201)
Browse files Browse the repository at this point in the history
  • Loading branch information
cebtenzzre authored Jan 29, 2024
1 parent fbf1dde commit 6daa69e
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4136,7 +4136,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
}

#ifdef GGML_USE_KOMPUTE
if (ggml_vk_has_device() && params.n_gpu_layers > 0 && (
if (params.n_gpu_layers > 0 && (
!(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
|| !(
model.ftype == LLAMA_FTYPE_ALL_F32 ||
Expand All @@ -4145,8 +4145,8 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
)
)) {
// disable Vulkan due to unsupported model architecture or quantization type
// TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
params.n_gpu_layers = 0;
}
#endif
Expand Down

0 comments on commit 6daa69e

Please sign in to comment.