diff --git a/examples/server/public/completion.js b/examples/server/public/completion.js
index c281f0fbd5535..6e2b99565dc6e 100644
--- a/examples/server/public/completion.js
+++ b/examples/server/public/completion.js
@@ -34,7 +34,8 @@ export async function* llama(prompt, params = {}, config = {}) {
headers: {
'Connection': 'keep-alive',
'Content-Type': 'application/json',
- 'Accept': 'text/event-stream'
+ 'Accept': 'text/event-stream',
+ ...(params.api_key ? {'Authorization': `Bearer ${params.api_key}`} : {})
},
signal: controller.signal,
});
diff --git a/examples/server/public/index.html b/examples/server/public/index.html
index 451fd4a3be602..07d779d2008a2 100644
--- a/examples/server/public/index.html
+++ b/examples/server/public/index.html
@@ -235,7 +235,8 @@
grammar: '',
n_probs: 0, // no completion_probabilities,
image_data: [],
- cache_prompt: true
+ cache_prompt: true,
+ api_key: ''
})
/* START: Support for storing prompt templates and parameters in browsers LocalStorage */
@@ -790,6 +791,10 @@
+
`
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 39d1e83d18575..5f93dcb66a4e2 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -36,6 +36,7 @@ using json = nlohmann::json;
struct server_params
{
std::string hostname = "127.0.0.1";
+ std::string api_key;
std::string public_path = "examples/server/public";
int32_t port = 8080;
int32_t read_timeout = 600;
@@ -1953,6 +1954,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
+ printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
@@ -2002,6 +2004,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
}
sparams.public_path = argv[i];
}
+ else if (arg == "--api-key")
+ {
+ if (++i >= argc)
+ {
+ invalid_param = true;
+ break;
+ }
+ sparams.api_key = argv[i];
+ }
else if (arg == "--timeout" || arg == "-to")
{
if (++i >= argc)
@@ -2669,6 +2680,32 @@ int main(int argc, char **argv)
httplib::Server svr;
+ // Middleware for API key validation
+ auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
+ // If API key is not set, skip validation
+ if (sparams.api_key.empty()) {
+ return true;
+ }
+
+ // Check for API key in the header
+ auto auth_header = req.get_header_value("Authorization");
+ std::string prefix = "Bearer ";
+ if (auth_header.substr(0, prefix.size()) == prefix) {
+ std::string received_api_key = auth_header.substr(prefix.size());
+ if (received_api_key == sparams.api_key) {
+ return true; // API key is valid
+ }
+ }
+
+ // API key is invalid or not provided
+ res.set_content("Unauthorized: Invalid API Key", "text/plain");
+ res.status = 401; // Unauthorized
+
+ LOG_WARNING("Unauthorized: Invalid API Key", {});
+
+ return false;
+ };
+
svr.set_default_headers({{"Server", "llama.cpp"},
{"Access-Control-Allow-Origin", "*"},
{"Access-Control-Allow-Headers", "content-type"}});
@@ -2711,8 +2748,11 @@ int main(int argc, char **argv)
res.set_content(data.dump(), "application/json");
});
- svr.Post("/completion", [&llama](const httplib::Request &req, httplib::Response &res)
+ svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
{
+ if (!validate_api_key(req, res)) {
+ return;
+ }
json data = json::parse(req.body);
const int task_id = llama.request_completion(data, false, false, -1);
if (!json_value(data, "stream", false)) {
@@ -2799,8 +2839,11 @@ int main(int argc, char **argv)
});
// TODO: add mount point without "/v1" prefix -- how?
- svr.Post("/v1/chat/completions", [&llama](const httplib::Request &req, httplib::Response &res)
+ svr.Post("/v1/chat/completions", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
{
+ if (!validate_api_key(req, res)) {
+ return;
+ }
json data = oaicompat_completion_params_parse(json::parse(req.body));
const int task_id = llama.request_completion(data, false, false, -1);
@@ -2869,8 +2912,11 @@ int main(int argc, char **argv)
}
});
- svr.Post("/infill", [&llama](const httplib::Request &req, httplib::Response &res)
+ svr.Post("/infill", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
{
+ if (!validate_api_key(req, res)) {
+ return;
+ }
json data = json::parse(req.body);
const int task_id = llama.request_completion(data, true, false, -1);
if (!json_value(data, "stream", false)) {
@@ -3005,11 +3051,15 @@ int main(int argc, char **argv)
svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
{
+ if (res.status == 401)
+ {
+ res.set_content("Unauthorized", "text/plain");
+ }
if (res.status == 400)
{
res.set_content("Invalid request", "text/plain");
}
- else if (res.status != 500)
+ else if (res.status == 404)
{
res.set_content("File Not Found", "text/plain");
res.status = 404;
@@ -3032,11 +3082,15 @@ int main(int argc, char **argv)
// to make it ctrl+clickable:
LOG_TEE("\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
- LOG_INFO("HTTP server listening", {
- {"hostname", sparams.hostname},
- {"port", sparams.port},
- });
+ std::unordered_map log_data;
+ log_data["hostname"] = sparams.hostname;
+ log_data["port"] = std::to_string(sparams.port);
+
+ if (!sparams.api_key.empty()) {
+ log_data["api_key"] = "api_key: ****" + sparams.api_key.substr(sparams.api_key.length() - 4);
+ }
+ LOG_INFO("HTTP server listening", log_data);
// run the HTTP server in a thread - see comment below
std::thread t([&]()
{
diff --git a/ggml.c b/ggml.c
index 1feb7ead33ef8..ad546a7314a14 100644
--- a/ggml.c
+++ b/ggml.c
@@ -9580,16 +9580,11 @@ static bool ggml_compute_forward_mul_mat_use_blas(
}
#endif
-// off1 = offset in i11 and i1
-// cne1 = ne11 and ne1
-// in a normal matrix multiplication, off1 = 0 and cne1 = ne1
-// during GGML_TASK_INIT, the full src1 is converted regardless of off1 and cne1
static void ggml_compute_forward_mul_mat(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
- struct ggml_tensor * dst,
- int64_t off1, int64_t cne1) {
+ struct ggml_tensor * dst) {
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
@@ -9657,9 +9652,9 @@ static void ggml_compute_forward_mul_mat(
const int64_t i03 = i13/r3;
const int64_t i02 = i12/r2;
- const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
- const float * y = (float *) ((char *) src1->data + off1*nb11 + i12*nb12 + i13*nb13);
- float * d = (float *) ((char *) dst->data + off1*nb1 + i12*nb2 + i13*nb3);
+ const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
+ const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
if (type != GGML_TYPE_F32) {
float * const wdata = params->wdata;
@@ -9676,7 +9671,7 @@ static void ggml_compute_forward_mul_mat(
}
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
- cne1, ne01, ne10,
+ ne1, ne01, ne10,
1.0f, y, ne10,
x, ne00,
0.0f, d, ne01);
@@ -9717,8 +9712,8 @@ static void ggml_compute_forward_mul_mat(
const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
- const int64_t nr0 = ne01; // src0 rows
- const int64_t nr1 = cne1*ne12*ne13; // src1 rows
+ const int64_t nr0 = ne01; // src0 rows
+ const int64_t nr1 = ne1*ne12*ne13; // src1 rows
//printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
@@ -9760,9 +9755,9 @@ static void ggml_compute_forward_mul_mat(
for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
- const int64_t i13 = (ir1/(ne12*cne1));
- const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
- const int64_t i11 = (ir1 - i13*ne12*cne1 - i12*cne1) + off1;
+ const int64_t i13 = (ir1/(ne12*ne1));
+ const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1;
+ const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1);
// broadcast src0 into src1
const int64_t i03 = i13/r3;
@@ -9802,28 +9797,191 @@ static void ggml_compute_forward_mul_mat(
static void ggml_compute_forward_mul_mat_id(
const struct ggml_compute_params * params,
- const struct ggml_tensor * src0,
+ const struct ggml_tensor * ids,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
- // during GGML_TASK_INIT the entire src1 is converted to vec_dot_type
- ggml_compute_forward_mul_mat(params, dst->src[2], src1, dst, 0, dst->ne[1]);
- return;
- }
+ const struct ggml_tensor * src0 = dst->src[2]; // only for GGML_TENSOR_BINARY_OP_LOCALS
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const enum ggml_type type = src0->type;
+
+ const bool src1_cont = ggml_is_contiguous(src1);
+
+ ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
+ enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
+ ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
+
+ GGML_ASSERT(ne0 == ne01);
+ GGML_ASSERT(ne1 == ne11);
+ GGML_ASSERT(ne2 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+
+ // we don't support permuted src0 or src1
+ GGML_ASSERT(nb00 == ggml_type_size(type));
+ GGML_ASSERT(nb10 == ggml_type_size(src1->type));
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
- const struct ggml_tensor * ids = src0;
+ // broadcast factors
+ const int64_t r2 = ne12/ne02;
+ const int64_t r3 = ne13/ne03;
+
+ // row groups
const int id = ggml_get_op_params_i32(dst, 0);
const int n_as = ggml_get_op_params_i32(dst, 1);
- for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
- const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
+ char * wdata_src1_end = (src1->type == vec_dot_type) ?
+ (char *) params->wdata :
+ (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
+
+ int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
+ int64_t * matrix_rows = matrix_row_counts + n_as; // [n_as][ne11]
+
+ #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne11 + (i1)]
- GGML_ASSERT(row_id >= 0 && row_id < n_as);
+ if (params->type == GGML_TASK_INIT) {
+ char * wdata = params->wdata;
+ if (src1->type != vec_dot_type) {
+ const size_t row_size = ggml_row_size(vec_dot_type, ne10);
- const struct ggml_tensor * src0_row = dst->src[row_id + 2];
- ggml_compute_forward_mul_mat(params, src0_row, src1, dst, i01, 1);
+ assert(params->wsize >= ne11*ne12*ne13*row_size);
+ assert(src1->type == GGML_TYPE_F32);
+
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
+ for (int64_t i12 = 0; i12 < ne12; ++i12) {
+ for (int64_t i11 = 0; i11 < ne11; ++i11) {
+ from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
+ wdata += row_size;
+ }
+ }
+ }
+ }
+
+ // initialize matrix_row_counts
+ GGML_ASSERT(wdata == wdata_src1_end);
+ memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
+
+ // group rows by src0 matrix
+ for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
+ const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
+
+ GGML_ASSERT(row_id >= 0 && row_id < n_as);
+ MMID_MATRIX_ROW(row_id, matrix_row_counts[row_id]) = i01;
+ matrix_row_counts[row_id] += 1;
+ }
+
+ return;
}
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // compute each matrix multiplication in sequence
+ for (int cur_a = 0; cur_a < n_as; ++cur_a) {
+ const int64_t cne1 = matrix_row_counts[cur_a];
+
+ if (cne1 == 0) {
+ continue;
+ }
+
+ const struct ggml_tensor * src0_cur = dst->src[cur_a + 2];
+
+ const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
+ const size_t row_size = ggml_row_size(vec_dot_type, ne10);
+
+ const int64_t nr0 = ne01; // src0 rows
+ const int64_t nr1 = cne1*ne12*ne13; // src1 rows
+
+ //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
+
+ // distribute the thread work across the inner or outer loop based on which one is larger
+
+ const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
+ const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
+
+ const int64_t ith0 = ith % nth0;
+ const int64_t ith1 = ith / nth0;
+
+ const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
+ const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
+
+ const int64_t ir010 = dr0*ith0;
+ const int64_t ir011 = MIN(ir010 + dr0, nr0);
+
+ const int64_t ir110 = dr1*ith1;
+ const int64_t ir111 = MIN(ir110 + dr1, nr1);
+
+ //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
+
+ // threads with no work simply yield (not sure if it helps)
+ if (ir010 >= ir011 || ir110 >= ir111) {
+ sched_yield();
+ continue;
+ }
+
+ assert(ne12 % ne02 == 0);
+ assert(ne13 % ne03 == 0);
+
+ // block-tiling attempt
+ const int64_t blck_0 = 16;
+ const int64_t blck_1 = 16;
+
+ // attempt to reduce false-sharing (does not seem to make a difference)
+ float tmp[16];
+
+ for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
+ for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
+ for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
+ const int64_t i13 = (ir1/(ne12*cne1)); // Note: currently, src1 is always a matrix
+ const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
+ const int64_t _i11 = (ir1 - i13*ne12*cne1 - i12*cne1);
+ const int64_t i11 = MMID_MATRIX_ROW(cur_a, _i11);
+
+ // broadcast src0 into src1
+ const int64_t i03 = i13/r3;
+ const int64_t i02 = i12/r2;
+
+ const int64_t i1 = i11;
+ const int64_t i2 = i12;
+ const int64_t i3 = i13;
+
+ const char * src0_row = (const char *) src0_cur->data + (0 + i02*nb02 + i03*nb03);
+
+ // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
+ // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
+ // the original src1 data pointer, so we should index using the indices directly
+ // TODO: this is a bit of a hack, we should probably have a better way to handle this
+ const char * src1_col = (const char *) wdata +
+ (src1_cont || src1->type != vec_dot_type
+ ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
+ : (i11*nb11 + i12*nb12 + i13*nb13));
+
+ float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
+
+ //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
+ // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
+ //}
+
+ for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
+ vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
+ }
+ memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
+ }
+ }
+ }
+ }
+
+ #undef MMID_MATRIX_ROW
}
// ggml_compute_forward_out_prod
@@ -14191,7 +14349,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
} break;
case GGML_OP_MUL_MAT:
{
- ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor, 0, tensor->ne[1]);
+ ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_MUL_MAT_ID:
{
@@ -15991,7 +16149,6 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
} break;
case GGML_OP_MUL_MAT_ID:
{
- // FIXME: blas
n_tasks = n_threads;
} break;
case GGML_OP_OUT_PROD:
@@ -16325,20 +16482,16 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
} break;
case GGML_OP_MUL_MAT_ID:
{
- const struct ggml_tensor * a = node->src[2];
- const struct ggml_tensor * b = node->src[1];
- const enum ggml_type vec_dot_type = type_traits[a->type].vec_dot_type;
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
- if (ggml_compute_forward_mul_mat_use_blas(a, b, node)) {
- if (a->type != GGML_TYPE_F32) {
- // here we need memory just for single 2D matrix from src0
- cur = ggml_type_size(GGML_TYPE_F32)*(a->ne[0]*a->ne[1]);
- }
- } else
-#endif
- if (b->type != vec_dot_type) {
- cur = ggml_row_size(vec_dot_type, ggml_nelements(b));
+ const struct ggml_tensor * src0 = node->src[2];
+ const struct ggml_tensor * src1 = node->src[1];
+ const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
+ if (src1->type != vec_dot_type) {
+ cur = ggml_row_size(vec_dot_type, ggml_nelements(src1));
}
+ const int n_as = ggml_get_op_params_i32(node, 1);
+ cur = GGML_PAD(cur, sizeof(int64_t)); // align
+ cur += n_as * sizeof(int64_t); // matrix_row_counts
+ cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
} break;
case GGML_OP_OUT_PROD:
{