Skip to content

Commit

Permalink
llama : add support for lora adapters in T5 model (ggerganov#8938)
Browse files Browse the repository at this point in the history
Co-authored-by: Stanisław Szymczyk <[email protected]>
  • Loading branch information
fairydreaming and sszymczy authored Aug 9, 2024
1 parent 272e3bd commit 6afd1a9
Showing 1 changed file with 13 additions and 13 deletions.
26 changes: 13 additions & 13 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13167,13 +13167,13 @@ struct llm_build_context {

// self-attention
{
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq_enc, cur);
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_enc, cur);
cb(Qcur, "Qcur", il);

struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk_enc, cur);
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_enc, cur);
cb(Kcur, "Kcur", il);

struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv_enc, cur);
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_enc, cur);
cb(Vcur, "Vcur", il);

Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Expand Down Expand Up @@ -13207,7 +13207,7 @@ struct llm_build_context {

ggml_build_forward_expand(gf, cur);

cur = ggml_mul_mat(ctx0, model.layers[il].wo_enc, cur);
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_enc, cur);
cb(cur, "kqv_out", il);
}

Expand Down Expand Up @@ -13281,13 +13281,13 @@ struct llm_build_context {

// self-attention
{
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
cb(Qcur, "Qcur", il);

struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
cb(Kcur, "Kcur", il);

struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
cb(Vcur, "Vcur", il);

llm_build_kv_store(ctx0, hparams, cparams, kv_self, gf, Kcur, Vcur, n_tokens, kv_head, cb, il);
Expand Down Expand Up @@ -13334,7 +13334,7 @@ struct llm_build_context {

ggml_build_forward_expand(gf, cur);

cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
cb(cur, "kqv_out", il);
}

Expand All @@ -13351,13 +13351,13 @@ struct llm_build_context {

// cross-attention
{
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq_cross, cur);
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_cross, cur);
cb(Qcur, "Qcur", il);

struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk_cross, embd_enc);
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_cross, embd_enc);
cb(Kcur, "Kcur", il);

struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv_cross, embd_enc);
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_cross, embd_enc);
cb(Vcur, "Vcur", il);

Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Expand Down Expand Up @@ -13386,7 +13386,7 @@ struct llm_build_context {

ggml_build_forward_expand(gf, cur);

cur = ggml_mul_mat(ctx0, model.layers[il].wo_cross, cur);
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_cross, cur);
cb(cur, "kqv_out", il);
}

Expand Down Expand Up @@ -13443,7 +13443,7 @@ struct llm_build_context {
cb(cur, "result_norm", -1);

// lm_head
cur = ggml_mul_mat(ctx0, model.output, cur);
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
cb(cur, "result_output", -1);
}

Expand Down

0 comments on commit 6afd1a9

Please sign in to comment.