From 92515f78bcd9b14cdf0b05bad149acc95e31565e Mon Sep 17 00:00:00 2001 From: Nischay Yadav Date: Mon, 14 Oct 2024 11:48:24 -0700 Subject: [PATCH] Added code for inernal tls Signed-off-by: Nischay Yadav --- configs/cert1/ca.key | 28 + configs/cert1/ca.pem | 22 + configs/cert1/client.csr | 20 + configs/cert1/client.key | 28 + configs/cert1/client.pem | 24 + configs/cert1/gen.sh | 22 + configs/cert1/openssl.cnf | 213 ++++ configs/cert1/server.csr | 20 + configs/cert1/server.key | 28 + configs/cert1/server.pem | 24 + configs/docker-compose.yml | 175 +++ configs/milvus.yaml | 89 +- configs/milvus_internaltls.yaml | 1053 +++++++++++++++++ .../distributed/datacoord/client/client.go | 11 +- .../datacoord/client/client_test.go | 87 +- internal/distributed/datacoord/service.go | 9 +- .../distributed/datanode/client/client.go | 11 +- .../datanode/client/client_test.go | 91 ++ internal/distributed/datanode/service.go | 10 +- .../distributed/indexnode/client/client.go | 10 + .../indexnode/client/client_test.go | 91 ++ internal/distributed/indexnode/service.go | 12 +- internal/distributed/proxy/client/client.go | 10 + .../distributed/proxy/client/client_test.go | 88 ++ internal/distributed/proxy/service.go | 7 +- .../distributed/querycoord/client/client.go | 11 +- .../querycoord/client/client_test.go | 90 ++ internal/distributed/querycoord/service.go | 10 +- .../distributed/querynode/client/client.go | 13 +- .../querynode/client/client_test.go | 96 +- internal/distributed/querynode/service.go | 11 +- .../distributed/rootcoord/client/client.go | 11 +- .../rootcoord/client/client_test.go | 97 +- internal/distributed/rootcoord/service.go | 9 +- internal/distributed/utils/util.go | 69 ++ internal/mocks/mock_grpc_client.go | 35 + internal/util/grpcclient/client.go | 16 +- internal/util/mock/grpcclient.go | 6 + pkg/util/paramtable/grpc_param.go | 71 +- pkg/util/paramtable/grpc_param_test.go | 11 + 40 files changed, 2629 insertions(+), 110 deletions(-) create mode 100644 configs/cert1/ca.key create mode 100644 configs/cert1/ca.pem create mode 100644 configs/cert1/client.csr create mode 100644 configs/cert1/client.key create mode 100644 configs/cert1/client.pem create mode 100755 configs/cert1/gen.sh create mode 100644 configs/cert1/openssl.cnf create mode 100644 configs/cert1/server.csr create mode 100644 configs/cert1/server.key create mode 100644 configs/cert1/server.pem create mode 100644 configs/docker-compose.yml create mode 100644 configs/milvus_internaltls.yaml diff --git a/configs/cert1/ca.key b/configs/cert1/ca.key new file mode 100644 index 0000000000000..3f135771e87fe --- /dev/null +++ b/configs/cert1/ca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDB16f4wWQh21Vl +u1LUFIeMyGZVjG0ya1+TVS2h1C/3nW82Ed+/eundfVN5LLMaRJ22Mu94njE9kz0g ++20bVghPC1P5lNJd4qivaq4FZkEe09sGVYSAOrm5etnCJQHoYJH7Epuusazb80a0 +ABzK5xKvYwi7qzqUss0GH4MvbF5oh3sMvsDMX1+saVRPhm5Nsl8gVukfOpCURdNs +fj9or69IETmlM3RvLdqSjy87syQ7PmX9iKZj6pxvvFUPrXABNvcfw6qDvzXnFz3D +HgWhNzz5MX5iL39b+mRnr0oyO/UxhcjmO8LZBMmOzZR3yh4qoEDaaX7L7o1AFiOQ +hBmsdtXjAgMBAAECggEACnjQkb883hlfzS2oBV4Qjpn68U8zTQCA+tAktXPlnX5r +ViP0Dv+gZ79QBaFVEau4S3IH/t+o8Fcme8QpeksVCtQlVrvj+391tnhQa2lBRl2S +nQ58qILDqtIItpTSPBAPgt1X/MGLgZxsaugApjOCjmgOvji7YvIf4l9McoDrvsDO +4s1G5l8dL0LI+F9f0rHuJAqU96FsU8c38/EuWC95zUiJB96pr4YfRhkImQKU7l6D +6qJ/mmOhf0oLHZtaJUcZWERmZ2mEwiiT4077A64+dID/tX5jxYtXW4RrJyPQFpFi +y8hN/rQxJzVkjlJprfUX2YnawQcPqopC+3mdYvu2/QKBgQDewZ+nuPj1eDYFRMs2 +fYrwBRymr3tFv8ld/Opbmcf9qYOKZvRnLBZcUQ1oaHTTvM3LWrC9Slc9tC3ZWDEj +njPvyocxUG9Aui4CXSEoZ2TDBSyZ+zY3fxTAKDQTp39kAROnogU/Br8r5Ayw6LIU +w9r8wA1HLWCKWCFvvhNiuQZ21QKBgQDexWEiksQxIJJNZ1SjOAopcQC0A58L+z0s +k2VN0ZfnpkQN3NX8p4C2mkbVP0GACeweVLkEw+A9Xn7fPD3zxCMyLxz4wxlIK4yR +fLOO+pufUUwicQRljKEHtyDIKY9ANwcN6ZHjzqSi89oIRmRtl03wSmSVaLMy6Pkv +DQBTIAtl1wKBgQCMa6BaWYdPLk1XeutjWTaZYY+TH/HkoSauQf32anWg2RgbyhTt +aORbo5iBJb/nrhBIzc22NcPTGN8CMM8hNdxln9UjaCIJupPAV7bMQolJSty34J1n +1ItfKrjRXvg2MVuTg9YsyfL/Fw5NAxMndeN5CxQTt24xJ6dESpzr0dlaVQKBgEQV +01A7xxa0u8JIeIfCEukDjpF2IeAJk3LSQb389w5ntJkAC67CiwZENK3sPVqirbW/ +cyTPsPNQtDoKMW0VXyRxXctx5YI3PSkiEcb7NSEGOdy06gEXcAfpom1KtmuL5e4M +mvgjKhJbsSGiUgNkoP913TQE4fAHYFFPCfBd9N3VAoGBAIYYlJwuOK6bY0aGmuM6 +dCbrB0r6mbRE0+PoegMORoU/VENnHGBQqoxf4FaCM9SJ4i++7IeHbTIbYOkW1frA +C30PyM2Nj4LvuAl8UwVUkLK0Xijbp+XITrcOyZ2ZHYQIAuQti++RGvBdjpLBYWyA +wSaHTh5gLbv9ZdXYH2oE55nf +-----END PRIVATE KEY----- diff --git a/configs/cert1/ca.pem b/configs/cert1/ca.pem new file mode 100644 index 0000000000000..6aa7f46276d00 --- /dev/null +++ b/configs/cert1/ca.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDszCCApugAwIBAgIUa/W9LQD62MtJa0+S6cQ9Yd27OXgwDQYJKoZIhvcNAQEL +BQAwaTELMAkGA1UEBhMCQ04xETAPBgNVBAgMCFNoYW5naGFpMREwDwYDVQQHDAhT +aGFuZ2hhaTEPMA0GA1UECgwGbWlsdnVzMQ8wDQYDVQQLDAZtaWx2dXMxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0yNDA5MDMxNDMxMjZaFw0zNDA5MDExNDMxMjZaMGkx +CzAJBgNVBAYTAkNOMREwDwYDVQQIDAhTaGFuZ2hhaTERMA8GA1UEBwwIU2hhbmdo +YWkxDzANBgNVBAoMBm1pbHZ1czEPMA0GA1UECwwGbWlsdnVzMRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB16f4wWQh +21Vlu1LUFIeMyGZVjG0ya1+TVS2h1C/3nW82Ed+/eundfVN5LLMaRJ22Mu94njE9 +kz0g+20bVghPC1P5lNJd4qivaq4FZkEe09sGVYSAOrm5etnCJQHoYJH7Epuusazb +80a0ABzK5xKvYwi7qzqUss0GH4MvbF5oh3sMvsDMX1+saVRPhm5Nsl8gVukfOpCU +RdNsfj9or69IETmlM3RvLdqSjy87syQ7PmX9iKZj6pxvvFUPrXABNvcfw6qDvzXn +Fz3DHgWhNzz5MX5iL39b+mRnr0oyO/UxhcjmO8LZBMmOzZR3yh4qoEDaaX7L7o1A +FiOQhBmsdtXjAgMBAAGjUzBRMB0GA1UdDgQWBBT2JD+0bHwTqhNXYZnClGC74QPt +kzAfBgNVHSMEGDAWgBT2JD+0bHwTqhNXYZnClGC74QPtkzAPBgNVHRMBAf8EBTAD +AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCEKmNgJ7Bu/nRUaihnx5xK5KIyeXLUpc8Z +qvSs0sdjar+/liTIb/0U+kGxTgLV6RToioQS4QBwk6YOMTPx010WJ4DJSawqP2Xg +IxI5PxK5hFihnQiWTJX3xBkZwRnI4CgoMRQyA2hF8l7VCW2F6/NlwWU1logYxIja +e0JwSBJquy8OzI90avAMk7fv3oLPY4IOcsTLKg6kNeMf9d5dSDhgLmy1VOmm8jkl +zs+DRqwwl+btDyQwRTZbwH4MBevU7J78ONNwKoL1pz6sApxgFo6Vxe/C4EKSgAvt ++634aw5UokTQRiKxPYovWCr97FBPgEi+8OOtV2fcpffdueglVLLY +-----END CERTIFICATE----- diff --git a/configs/cert1/client.csr b/configs/cert1/client.csr new file mode 100644 index 0000000000000..449ac6088e76f --- /dev/null +++ b/configs/cert1/client.csr @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDOzCCAiMCAQAwQzELMAkGA1UEBhMCQ04xDzANBgNVBAoMBm1pbHZ1czEPMA0G +A1UECwwGbWlsdnVzMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCtJXJL5oaOVpgerMvkL9UmPhQPh4YYVSUH7F0lfihy +IaIwozU7hAQelrp9zg0c8YE2wWn8+ejOnQp/XQ55DWSzi1tjwDwnVr5euDkki108 +pHmEFYEHPkn3UUGplegxAVxOZdWlgc5wVKfpu+eMCllguY+8lrF3ZYJyduPAlhjU +8gA2KUmwh7bu2hRE3s9p6PdCdKrH/EqKqoyxh/m0p/IuhHfiv3p9E2S3szJNoxYC +0xwPSilRMwCj6io6fkcM4KqApqmNuM3fYoTwrE+RhO3FqdDbM6a3d653I7e5A72G +oh0A5GU5M2CU8B+46ez1wL/03KxAGK93gCsxqsX1QJlpAgMBAAGggbIwga8GCSqG +SIb3DQEJDjGBoTCBnjCBgwYDVR0RBHwweoIQbWlsdnVzLWluZGV4bm9kZYIMbWls +dnVzLXByb3h5ghBtaWx2dXMtcm9vdGNvb3JkghFtaWx2dXMtcXVlcnljb29yZIIQ +bWlsdnVzLWRhdGFjb29yZIIPbWlsdnVzLWRhdGFub2RlghBtaWx2dXMtcXVlcnlu +b2RlMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMA0GCSqGSIb3DQEBCwUAA4IBAQBP +Q7mVMd7GArJuaNFJSphBzUURIuf6FQjeluk5kxgIRHLEu0Lxv6X/Kmugc8hdi9BI +afEL3NvuzuZ7bVt6irhVeSRuNgmx513G/HkYpi6H16LYyj9x38AOCRnqD+XJ1yjR +R3KwC/VTNVRLS7iv8qDdHCpNfPjtgwVKL9HS14rj/ztUw5JSWTizc/VycwOhaj+g +2EckZ8uBPS1KCGQvPfdP63D0XuWU2PRuu4WBxH2u5qK1C0AlVzyKYqLl2IODH7Gk +dkhVAjLsIRLoMfihoDYWO18rOKbfReerLoR7p4kk4UOifLyo4VU9sqFzG1vUbztr +jogy6LVH0laQAjeYM69c +-----END CERTIFICATE REQUEST----- diff --git a/configs/cert1/client.key b/configs/cert1/client.key new file mode 100644 index 0000000000000..a205787ccd285 --- /dev/null +++ b/configs/cert1/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtJXJL5oaOVpge +rMvkL9UmPhQPh4YYVSUH7F0lfihyIaIwozU7hAQelrp9zg0c8YE2wWn8+ejOnQp/ +XQ55DWSzi1tjwDwnVr5euDkki108pHmEFYEHPkn3UUGplegxAVxOZdWlgc5wVKfp +u+eMCllguY+8lrF3ZYJyduPAlhjU8gA2KUmwh7bu2hRE3s9p6PdCdKrH/EqKqoyx +h/m0p/IuhHfiv3p9E2S3szJNoxYC0xwPSilRMwCj6io6fkcM4KqApqmNuM3fYoTw +rE+RhO3FqdDbM6a3d653I7e5A72Goh0A5GU5M2CU8B+46ez1wL/03KxAGK93gCsx +qsX1QJlpAgMBAAECggEAKtxNKcq73jpDV7yQkEDi85+EWb/DivxwuUBiYmiTXIVE +3+NWxw/L8UKAkanTByolQUepk+zNG3A2bAa+EWsmVWeWzN+GRHqtj0btJkW1Y9CX +IAIuuXtK1gTwSymHm7HphqKgu2Khlp7uHebcz4a0T0Q/94AaGCMxomIZyM+e//8r +iUv7QMZsTAD8ilfoxe7Sh9gSK57UJUoqJ5zf8qf7OfHrPqct4rhzNlICqoxOivdL +c5S1LOWW8uS7txRqs99WLSwDu+IaA+Q6a6DQMOVzTKWO7+jzS0nE5KeG/Y2EPPxz +2s1D3tm5CV5eOlCBXbxRp2RunPwGusUNwK9B9T7y/QKBgQDbfNYA1hBP3ISeocLx +V5JlSxBqA2/uK1sd4PoR/5GvZhezV+TE2A+jwQlOWb7Y9oFm1eG4yjjx8w8SZcXm +nDr3+znUL5CHI4+aEg20ESb5b69ywMW4vWwjcESu+GseEmLDTqUVRwhOFPkjc25J +we9Ij1tK9RLxJQCAULDrxNNvuwKBgQDJ8xqP9lLGpQ1qORNUH/JsLaphHyxXwz2U +H0vfWfFCKyyaXnlenn4AxBXDX7OON2rGnaf9Xt4Y+MlQrHaTyKbta47/sQltiXdD +DQZpW2JanuZhfRKHOp26C3TjJazfOqu/BBzGX6ANOlY1bowBqHgtOA60y2osG34o +/0oRaVSvKwKBgGInhZyCHnFYcIZwWLmNLA7YbtK6Dsg341uNTMfXvtLtmRO+7rOI +3BWjMgIz6EEmm9/dLkBxTt4EkGztzn6+xzA2zTmyu7Nzl7iS8kXglclexNtQkht3 +unBuuIh215BvMHOfK/xD89bXUkMkT+H0NIoCchUnW/0zSvIXI8eMEfIZAoGBAMGW +8Zefoaoa/skc/ZJfLZzWCcYgg/l9h8I+XhU1uzia+p7ggahadp91WQd47j8ekJTk +/Oy9zFpT42gaa3EvoBVoTiR+KXN/m3wDOGo22AnW2TILG3/WiOyXnu1AaH91Vr49 +yve/I0aK7ak616ufO9TdL/vQI280y7OxRed6+VsRAoGAcmErvfA0EQTzJr8qG3uv +s5bL7XHFvsve8AYQ9wj+4LJXRm2caA/XskFjf41EH/+pHf1sDfXKbrZIi+O1vpNr +H2yXVH0smfnrBZ8MRc6kytaROMzhsChXXIbtaDE/sV1hKRY2C8+htNdOjvGVBzoZ +uoAOr9OxOxXIB1DjFL2bphw= +-----END PRIVATE KEY----- diff --git a/configs/cert1/client.pem b/configs/cert1/client.pem new file mode 100644 index 0000000000000..6a107f2fc68e4 --- /dev/null +++ b/configs/cert1/client.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEHDCCAwSgAwIBAgIUEwau4SGcTvhF7Y0hX+/E1pyIEBUwDQYJKoZIhvcNAQEL +BQAwaTELMAkGA1UEBhMCQ04xETAPBgNVBAgMCFNoYW5naGFpMREwDwYDVQQHDAhT +aGFuZ2hhaTEPMA0GA1UECgwGbWlsdnVzMQ8wDQYDVQQLDAZtaWx2dXMxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0yNDA5MDMxNDMxMjZaFw0zNDA5MDExNDMxMjZaMEMx +CzAJBgNVBAYTAkNOMQ8wDQYDVQQKDAZtaWx2dXMxDzANBgNVBAsMBm1pbHZ1czES +MBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArSVyS+aGjlaYHqzL5C/VJj4UD4eGGFUlB+xdJX4ociGiMKM1O4QEHpa6fc4N +HPGBNsFp/Pnozp0Kf10OeQ1ks4tbY8A8J1a+Xrg5JItdPKR5hBWBBz5J91FBqZXo +MQFcTmXVpYHOcFSn6bvnjApZYLmPvJaxd2WCcnbjwJYY1PIANilJsIe27toURN7P +aej3QnSqx/xKiqqMsYf5tKfyLoR34r96fRNkt7MyTaMWAtMcD0opUTMAo+oqOn5H +DOCqgKapjbjN32KE8KxPkYTtxanQ2zOmt3eudyO3uQO9hqIdAORlOTNglPAfuOns +9cC/9NysQBivd4ArMarF9UCZaQIDAQABo4HhMIHeMIGDBgNVHREEfDB6ghBtaWx2 +dXMtaW5kZXhub2RlggxtaWx2dXMtcHJveHmCEG1pbHZ1cy1yb290Y29vcmSCEW1p +bHZ1cy1xdWVyeWNvb3JkghBtaWx2dXMtZGF0YWNvb3Jkgg9taWx2dXMtZGF0YW5v +ZGWCEG1pbHZ1cy1xdWVyeW5vZGUwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwHQYD +VR0OBBYEFPSIDDC9KH0GUBERB5OBuuRBqBKtMB8GA1UdIwQYMBaAFPYkP7RsfBOq +E1dhmcKUYLvhA+2TMA0GCSqGSIb3DQEBCwUAA4IBAQAMcjcDqfEOXMr19xFg5Lsq +01N7mZCGsdZpJgxqw541FZm6K8nZL8Ibi/42SR55jd6qt7wgOSKVoXWGzQAwgmA2 +SGQrWACwWUUXavdlA+OIW9pMAPhUELjdMwjES3AtYSqWngu1SyeF3zYjmRempueC +zlTMd2951nE6Z0w5IMn/ACqDtzY15+kQhA7+3ZMPVxHDt9Kd3xqROLfhFaT/sbbD +k026dwEakUsV49MZce259KpvecDYQZLFMJufe8QPV79KEF3CVD5SsLN8COVvHLk3 +kTxi4r9q+z9ErDBNAEKd1Pg95xEaDtboeBfgH5SnFg5sxFjRfm3wxg+XZ9t2GVT+ +-----END CERTIFICATE----- diff --git a/configs/cert1/gen.sh b/configs/cert1/gen.sh new file mode 100755 index 0000000000000..4c88056191181 --- /dev/null +++ b/configs/cert1/gen.sh @@ -0,0 +1,22 @@ +Country="CN" +State="Shanghai" +Location="Shanghai" +Organization="milvus" +Organizational="milvus" +CommonName="localhost" + +echo "generate ca.key" +openssl genrsa -out ca.key 2048 + +echo "generate ca.pem" +openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj "/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName" + +echo "generate server SAN certificate" +openssl genpkey -algorithm RSA -out server.key +openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj "/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName" -config ./openssl.cnf -extensions v3_req +openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req + +echo "generate client SAN certificate" +openssl genpkey -algorithm RSA -out client.key +openssl req -new -nodes -key client.key -out client.csr -days 3650 -subj "/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName" -config ./openssl.cnf -extensions v3_req +openssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req \ No newline at end of file diff --git a/configs/cert1/openssl.cnf b/configs/cert1/openssl.cnf new file mode 100644 index 0000000000000..71911a1d37e91 --- /dev/null +++ b/configs/cert1/openssl.cnf @@ -0,0 +1,213 @@ + +HOME = . +RANDFILE = $ENV::HOME/.rnd + +oid_section = new_oids + + +[ new_oids ] + + +tsa_policy1 = 1.2.3.4.1 +tsa_policy2 = 1.2.3.4.5.6 +tsa_policy3 = 1.2.3.4.5.7 + +[ ca ] +default_ca = CA_default # The default ca section + +[ CA_default ] + +dir = ./demoCA # Where everything is kept +certs = $dir/certs # Where the issued certs are kept +crl_dir = $dir/crl # Where the issued crl are kept +database = $dir/index.txt # database index file. + # several ctificates with same subject. +new_certs_dir = $dir/newcerts # default place for new certs. + +certificate = $dir/cacert.pem # The CA certificate +serial = $dir/serial # The current serial number +crlnumber = $dir/crlnumber # the current crl number + # must be commented out to leave a V1 CRL +crl = $dir/crl.pem # The current CRL +private_key = $dir/private/cakey.pem# The private key +RANDFILE = $dir/private/.rand # private random number file + +x509_extensions = usr_cert # The extentions to add to the cert + +name_opt = ca_default # Subject Name options +cert_opt = ca_default # Certificate field options + +copy_extensions = copy + + +default_days = 365 # how long to certify for +default_crl_days= 30 # how long before next CRL +default_md = default # use public key default MD +preserve = no # keep passed DN ordering + +policy = policy_match + +[ policy_match ] +countryName = match +stateOrProvinceName = match +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ policy_anything ] +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req ] +default_bits = 2048 +default_keyfile = privkey.pem +distinguished_name = req_distinguished_name +attributes = req_attributes +x509_extensions = v3_ca # The extentions to add to the self signed cert + + +string_mask = utf8only + +req_extensions = v3_req # The extensions to add to a certificate request + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = AU +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = Some-State + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = Internet Widgits Pty Ltd + + +organizationalUnitName = Organizational Unit Name (eg, section) + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + +emailAddress = Email Address +emailAddress_max = 64 + + +[ req_attributes ] +challengePassword = A challenge password +challengePassword_min = 4 +challengePassword_max = 20 + +unstructuredName = An optional company name + +[ usr_cert ] + + + +basicConstraints=CA:FALSE + + + + + + + +nsComment = "OpenSSL Generated Certificate" + +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + + + + + +[ v3_req ] +subjectAltName = @alt_names +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +[ alt_names ] +DNS.1 = milvus-indexnode +DNS.2 = milvus-proxy +DNS.3 = milvus-rootcoord +DNS.4 = milvus-querycoord +DNS.5 = milvus-datacoord +DNS.6 = milvus-datanode +DNS.7 = milvus-querynode + +[ v3_ca ] + + + + + +subjectKeyIdentifier=hash + +authorityKeyIdentifier=keyid:always,issuer + +basicConstraints = CA:true + + + + + +[ crl_ext ] + + +authorityKeyIdentifier=keyid:always + +[ proxy_cert_ext ] + + +basicConstraints=CA:FALSE + + + + + + + +nsComment = "OpenSSL Generated Certificate" + +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + + + + +proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo + +[ tsa ] + +default_tsa = tsa_config1 # the default TSA section + +[ tsa_config1 ] + +dir = ./demoCA # TSA root directory +serial = $dir/tsaserial # The current serial number (mandatory) +crypto_device = builtin # OpenSSL engine to use for signing +signer_cert = $dir/tsacert.pem # The TSA signing certificate + # (optional) +certs = $dir/cacert.pem # Certificate chain to include in reply + # (optional) +signer_key = $dir/private/tsakey.pem # The TSA private key (optional) + +default_policy = tsa_policy1 # Policy if request did not specify it + # (optional) +other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional) +digests = md5, sha1 # Acceptable message digests (mandatory) +accuracy = secs:1, millisecs:500, microsecs:100 # (optional) +clock_precision_digits = 0 # number of digits after dot. (optional) +ordering = yes # Is ordering defined for timestamps? + # (optional, default: no) +tsa_name = yes # Must the TSA name be included in the reply? + # (optional, default: no) +ess_cert_id_chain = no # Must the ESS cert id chain be included? + # (optional, default: no) diff --git a/configs/cert1/server.csr b/configs/cert1/server.csr new file mode 100644 index 0000000000000..f44ca2c1a1191 --- /dev/null +++ b/configs/cert1/server.csr @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDOzCCAiMCAQAwQzELMAkGA1UEBhMCQ04xDzANBgNVBAoMBm1pbHZ1czEPMA0G +A1UECwwGbWlsdnVzMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDo5OwXUQ70OtO5HNh/hgLprcTD0VJLDkA7RX2gbCfj +WHxdTMu8kF86t4vh42vOjO6W3IKKFggg2jkXVWgyy52HDc4OMUM8kYjWTDlvf84A +dP0/0oBsKGVt0sBcylGPuzNjk2OU7v43ETZZ+BgBe3DXRVtjJ5t/qdTB0ynk0tmv +oOIniHJ2mslDkd1r5FmG2dWehOsCjxGhzw0thpV3l3xF9MhwR0sjivCcVJdXQ7nV +pL7RrwPopfCNxIFVH+9sXeyymkl/zfycwL9eK41fXiwKbLAoKCGKajt9rV6Vjggt +QonYlcnau7BEwE75PywBeIn0mdxIXHN3oJy12hmBo4FhAgMBAAGggbIwga8GCSqG +SIb3DQEJDjGBoTCBnjCBgwYDVR0RBHwweoIQbWlsdnVzLWluZGV4bm9kZYIMbWls +dnVzLXByb3h5ghBtaWx2dXMtcm9vdGNvb3JkghFtaWx2dXMtcXVlcnljb29yZIIQ +bWlsdnVzLWRhdGFjb29yZIIPbWlsdnVzLWRhdGFub2RlghBtaWx2dXMtcXVlcnlu +b2RlMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMA0GCSqGSIb3DQEBCwUAA4IBAQDc ++pZ5nKONOQK6ovscirQ92bfTNwXR79k0RLvzQwhuq/O3d+9yma6N1sV2qZMnhDoD +C68zMuvu8Ok5CBY6tN4qjz5eCqtiELgCA1qJhuJUqpPKS04/7JfMUdiAQSsIDxqP +GiZvFGx1Vinwj0eMJ6lTGKwpyj1J7WvVCnMZ1T8InIEcaZwaCAjkk8BcidwEJ+Nv +OAwvs9/EoMbhs+jOY1tbpz0sxuWPnX8cTZ6rTWAA8/2qZYs9XPjP2UqCTv+j8BY1 +if9/25DynFTG/drmswbM5GHAkPfeQ9fPqCf6o7qCAJMCYrgpOov9Bw+0D40oQTej +jtT/R+EydvNtdEuIerVQ +-----END CERTIFICATE REQUEST----- diff --git a/configs/cert1/server.key b/configs/cert1/server.key new file mode 100644 index 0000000000000..a200ffe00e145 --- /dev/null +++ b/configs/cert1/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDo5OwXUQ70OtO5 +HNh/hgLprcTD0VJLDkA7RX2gbCfjWHxdTMu8kF86t4vh42vOjO6W3IKKFggg2jkX +VWgyy52HDc4OMUM8kYjWTDlvf84AdP0/0oBsKGVt0sBcylGPuzNjk2OU7v43ETZZ ++BgBe3DXRVtjJ5t/qdTB0ynk0tmvoOIniHJ2mslDkd1r5FmG2dWehOsCjxGhzw0t +hpV3l3xF9MhwR0sjivCcVJdXQ7nVpL7RrwPopfCNxIFVH+9sXeyymkl/zfycwL9e +K41fXiwKbLAoKCGKajt9rV6VjggtQonYlcnau7BEwE75PywBeIn0mdxIXHN3oJy1 +2hmBo4FhAgMBAAECggEAAy7DeaQY/vDhwNPN7A3+gPlZrKOcuTu/AxPnT+BsSDxc +KdZ3jaKF5Gmx1PV9ZEgN/RcE6lZJhLSGvm2cT6XXB6f0I98tseF7C/are9KmWuB3 +S/dfhndOoxiFmlfpPfxSNdngLOXr4Y5v8DXRIUcqof59CfNaHX2Rfl6ZU7AnFDIM +z8grddf5NRFCWnCkfQyl1uZX7B9xGnUwZ24oda6SEalvDBYAODMp+iVVCv/qxN7v +YQSBNCGdyK4cKSNp7LKggzOEPA7QuRCjhXg1ZU5IBcEjwJbkn+19YemR6dQ9OKhQ +K+EJWxOq9K/SD3ernHecVSxMBfE9dwEtRQ0HvuyBYQKBgQD0TMqmByWFRiK/doiK +GbLuiHfk+btGIXTQ2VSyq0melKv0Y2qfqANDrYJv8anV+9Z562vqO+ruq2dYzhtz +4fsv1LovNjpDzJElayrQNMTJeexmMhii9AaMkHXmsX3zFhCzcgkJm9RykJcNePkl +U4O50DI5iu+RxYsT2l58IAxtuQKBgQD0DErCQKWrOr11mzGXE8XMC4CBZjVP89/n +k1DNxWtk+HUa8soRCms/XkXhXe06+yCA2ADhoJ4xlQb95fVEfBxchlZIjlrChK9M +fOLL0celBwvCnWCYcYwKcFGj/ImtI3XB1CxC01eIz0TAsjqydfa+e2fRW8Sc5b+k +IzCwHeLE6QKBgQDT/B6MWv8v2mAki5KHYy1k1n5S7nCWtklLQIrCTuee95sOgYXv +7DaI73KT+LtKtzCwspv9sOh3Gbyfzpj7wUfZhVnZxdBhLI12Trvj1yY7PdToCOhf +3iggqOnluHZ2DNuJGpbFA7chgl3QjvyCaymOfx5V2D1x3FyGaaI7SwP94QKBgQC4 +TKAYWmHCEbYKHo0wnkn9caI2Fk2kd3NbmrCm9LPJqKgnFbrra4bacAMt0pTYudUt +9/GSco2EvBpvmz3B3kg8x2ZZnMeFplYL2TQc3+t5AP0sl1TfSsCLEY9V29zM176S +Bfz26ktskbpGG1Kget0/tEoOnNK5+fbjqX+XQLqEIQKBgQC+6FU/ekv/2ps8r9Vi +61HUSp5+u6iAyogoEo5ntYFmYY3oK5xSKgAHNb7VZ96VBHFtMVMnHazbqR38IXWZ +MSbsliQArQiRbdWdO+E2vnYUibVQgz5M5gvOCsJ0cVBGN7BTew6Vgldz88HEw2aH +epDb2XvU2t1lrwpiXmrVlXYUIw== +-----END PRIVATE KEY----- diff --git a/configs/cert1/server.pem b/configs/cert1/server.pem new file mode 100644 index 0000000000000..a8fde486e8641 --- /dev/null +++ b/configs/cert1/server.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEHDCCAwSgAwIBAgIUDsM5d8ecqos/EGwgRFuPrkRZNmgwDQYJKoZIhvcNAQEL +BQAwaTELMAkGA1UEBhMCQ04xETAPBgNVBAgMCFNoYW5naGFpMREwDwYDVQQHDAhT +aGFuZ2hhaTEPMA0GA1UECgwGbWlsdnVzMQ8wDQYDVQQLDAZtaWx2dXMxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0yNDA5MDMxNDMxMjZaFw0zNDA5MDExNDMxMjZaMEMx +CzAJBgNVBAYTAkNOMQ8wDQYDVQQKDAZtaWx2dXMxDzANBgNVBAsMBm1pbHZ1czES +MBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA6OTsF1EO9DrTuRzYf4YC6a3Ew9FSSw5AO0V9oGwn41h8XUzLvJBfOreL4eNr +zozultyCihYIINo5F1VoMsudhw3ODjFDPJGI1kw5b3/OAHT9P9KAbChlbdLAXMpR +j7szY5NjlO7+NxE2WfgYAXtw10VbYyebf6nUwdMp5NLZr6DiJ4hydprJQ5Hda+RZ +htnVnoTrAo8Roc8NLYaVd5d8RfTIcEdLI4rwnFSXV0O51aS+0a8D6KXwjcSBVR/v +bF3ssppJf838nMC/XiuNX14sCmywKCghimo7fa1elY4ILUKJ2JXJ2ruwRMBO+T8s +AXiJ9JncSFxzd6CctdoZgaOBYQIDAQABo4HhMIHeMIGDBgNVHREEfDB6ghBtaWx2 +dXMtaW5kZXhub2RlggxtaWx2dXMtcHJveHmCEG1pbHZ1cy1yb290Y29vcmSCEW1p +bHZ1cy1xdWVyeWNvb3JkghBtaWx2dXMtZGF0YWNvb3Jkgg9taWx2dXMtZGF0YW5v +ZGWCEG1pbHZ1cy1xdWVyeW5vZGUwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwHQYD +VR0OBBYEFFVvAu0gUyozn6ROhJ+HwhaewkzEMB8GA1UdIwQYMBaAFPYkP7RsfBOq +E1dhmcKUYLvhA+2TMA0GCSqGSIb3DQEBCwUAA4IBAQCq0KaHRbWeg5OhOPVFnwdq +PypIDbd0xXGq9W/vThRp+V7CyEf/zsiDZk4Aa4DgSl3WPcEiJesDS0GTxbjZXlKz +5hFVeie95izWhzAZ7hNp7GEYNzoE5W8/VP35EF15kQJUSdycDDK6oWSl6kmC9rf/ +jsO4dnbqUm9BDRtTOfT3TrgM6wkwDWuUtfPiIUxJJLv+bN6rT3nAMq0DWSjqX9PS +ewsVA87b7GHd3uEgFl/j+bTV4uwXP4A9Xp5N4bgweQKm3sBQI6eSn5S0KO6XaExa +jv2o8xZto6z/ppRp27ma5f9Y1PCCVS1MTu/YtyE8BxObDcXUz24LiE25jRMGUir1 +-----END CERTIFICATE----- diff --git a/configs/docker-compose.yml b/configs/docker-compose.yml new file mode 100644 index 0000000000000..e0748d1889eaa --- /dev/null +++ b/configs/docker-compose.yml @@ -0,0 +1,175 @@ +version: '3.5' + +services: + etcd: + image: quay.io/coreos/etcd:v3.5.5 + environment: + - ETCD_AUTO_COMPACTION_MODE=revision + - ETCD_AUTO_COMPACTION_RETENTION=1000 + - ETCD_QUOTA_BACKEND_BYTES=4294967296 + - ETCD_SNAPSHOT_COUNT=50000 + volumes: + - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd + command: etcd -listen-peer-urls=http://127.0.0.1:2380 -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 -initial-advertise-peer-urls=http://127.0.0.1:2380 --initial-cluster default=http://127.0.0.1:2380 --data-dir /etcd + ports: + - "2379:2379" + - "2380:2380" + - "4001:4001" + + pulsar: + image: apachepulsar/pulsar:2.8.2 + volumes: + - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/pulsar:/pulsar/data + environment: + # bin/apply-config-from-env.py script will modify the configuration file based on the environment variables + # nettyMaxFrameSizeBytes must be calculated from maxMessageSize + 10240 (padding) + - nettyMaxFrameSizeBytes=104867840 # this is 104857600 + 10240 (padding) + - defaultRetentionTimeInMinutes=10080 + - defaultRetentionSizeInMB=8192 + # maxMessageSize is missing from standalone.conf, must use PULSAR_PREFIX_ to get it configured + - PULSAR_PREFIX_maxMessageSize=104857600 + - PULSAR_GC=-XX:+UseG1GC + command: | + /bin/bash -c \ + "bin/apply-config-from-env.py conf/standalone.conf && \ + exec bin/pulsar standalone --no-functions-worker --no-stream-storage" + ports: + - "6650:6650" + - "18080:8080" + + minio: + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + ports: + - "9000:9000" + - "9001:9001" + environment: + MINIO_ACCESS_KEY: minioadmin + MINIO_SECRET_KEY: minioadmin + volumes: + - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: + [ + "CMD", + "curl", + "-f", + "http://localhost:9000/minio/health/live" + ] + interval: 30s + timeout: 20s + retries: 3 + + azurite: + image: mcr.microsoft.com/azure-storage/azurite + volumes: + - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/azurite:/data + command: azurite-blob --blobHost 0.0.0.0 + ports: + - "10000:10000" + + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "6831:6831/udp" + - "4317:4317" # OLTP over gRPC + - "4318:4318" # OLTP over HTTP + - "16686:16686" # frontent + - "14268:14268" # jaeger.thirft + + zookeeper: + image: wurstmeister/zookeeper:latest + ports: + - "2181:2181" + + kafka: + image: 'bitnami/kafka:3.1.0' + ports: + - '9092:9092' + environment: + - KAFKA_BROKER_ID=0 + - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + # set kafka server config + - KAFKA_CFG_MAX_PARTITION_FETCH_BYTES=5242880 + - KAFKA_CFG_MAX_REQUEST_SIZE=5242880 + - KAFKA_CFG_MESSAGE_MAX_BYTES=5242880 + - KAFKA_CFG_REPLICA_FETCH_MAX_BYTES=5242880 + - KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES=5242880 + - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true + depends_on: + - zookeeper + + milvus-proxy: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run proxy" + hostname: milvus-proxy + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + ports: + - "19530:19530" + depends_on: + - milvus-rootcoord + - milvus-datacoord + + milvus-rootcoord: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run rootcoord" + hostname: milvus-rootcoord + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + + milvus-datacoord: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run datacoord" + hostname: milvus-datacoord + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + + milvus-querycoord: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run querycoord" + hostname: milvus-querycoord + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + + milvus-querynode: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run querynode" + hostname: milvus-querynode + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + + milvus-datanode: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run datanode" + hostname: milvus-datanode + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + + milvus-indexnode: + image: ghostbaby/milvus:master_2 + command: sh -c "envsubst < /milvus/configs/milvus_internaltls.yaml > /milvus/configs/milvus.yaml && milvus run indexnode" + hostname: milvus-indexnode + environment: + - MINIO_ADDRESS=minio:9000 + - ETCD_ENDPOINTS=etcd:2379 + - PULSAR_ADDRESS=pulsar://pulsar:6650 + +networks: + default: + name: milvus_dev \ No newline at end of file diff --git a/configs/milvus.yaml b/configs/milvus.yaml index 55939c50098da..a8d3f099c988c 100644 --- a/configs/milvus.yaml +++ b/configs/milvus.yaml @@ -133,17 +133,11 @@ minio: # aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role useIAM: false # Cloud Provider of S3. Supports: "aws", "gcp", "aliyun". - # Cloud Provider of Google Cloud Storage. Supports: "gcpnative". # You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio # You can use "gcp" for other cloud provider supports S3 API with signature v2 # You can use "aliyun" for other cloud provider uses virtual host style bucket - # You can use "gcpnative" for the Google Cloud Platform provider. Uses service account credentials - # for authentication. # When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now cloudProvider: aws - # The JSON content contains the gcs service account credentials. - # Used only for the "gcpnative" cloud provider. - gcpCredentialJSON: # Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws". # Leave it empty if you want to use AWS default endpoint iamEndpoint: @@ -167,7 +161,6 @@ mq: enablePursuitMode: true # Default value: "true" pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes - pursuitBufferTime: 60 # pursuit mode buffer time in seconds mqBufSize: 16 # MQ client consumer buffer length dispatcher: mergeCheckInterval: 1 # the interval time(in seconds) for dispatcher to check whether to merge @@ -185,9 +178,9 @@ pulsar: port: 6650 # Port of Pulsar service. webport: 80 # Web port of of Pulsar service. If you connect direcly without proxy, should use 8080. # The maximum size of each message in Pulsar. Unit: Byte. - # By default, Pulsar can transmit at most 2MB of data in a single message. When the size of inserted data is greater than this value, proxy fragments the data into multiple messages to ensure that they can be transmitted correctly. + # By default, Pulsar can transmit at most 5 MB of data in a single message. When the size of inserted data is greater than this value, proxy fragments the data into multiple messages to ensure that they can be transmitted correctly. # If the corresponding parameter in Pulsar remains unchanged, increasing this configuration will cause Milvus to fail, and reducing it produces no advantage. - maxMessageSize: 2097152 + maxMessageSize: 5242880 # Pulsar can be provisioned for specific tenants with appropriate capacity allocated to the tenant. # To share a Pulsar instance among multiple Milvus instances, you can change this to an Pulsar tenant rather than the default one for each Milvus instance before you start them. However, if you do not want Pulsar multi-tenancy, you are advised to change msgChannel.chanNamePrefix.cluster to the different value. tenant: public @@ -400,7 +393,6 @@ queryNode: nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist memExpansionRate: 1.15 # extra memory needed by building interim index buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num - multipleChunkedEnable: false # Enable multiple chunked search knowhereScoreConsistency: false # Enable knowhere strong consistency score computation logic loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments enableDisk: false # enable querynode load disk index, and search on disk index @@ -420,11 +412,7 @@ queryNode: vectorIndex: false # Enable mmap for loading vector index scalarField: false # Enable mmap for loading scalar data scalarIndex: false # Enable mmap for loading scalar index - chunkCache: true # Enable mmap for chunk cache (raw vector retrieving). - # Enable memory mapping (mmap) to optimize the handling of growing raw data. - # By activating this feature, the memory overhead associated with newly added or modified data will be significantly minimized. - # However, this optimization may come at the cost of a slight decrease in query latency for the affected data segments. - growingMmapEnabled: false + growingMmapEnabled: false # Enable mmap for using in growing raw data fixedFileSizeForMmapAlloc: 1 # tmp file size for mmap chunk manager maxDiskUsagePercentageForMmapAlloc: 50 # disk percentage used in mmap chunk manager lazyload: @@ -434,7 +422,6 @@ queryNode: requestResourceRetryInterval: 2000 # retry interval in milliseconds for waiting request resource for lazy load, 2s by default maxRetryTimes: 1 # max retry times for lazy load, 1 by default maxEvictPerRetry: 1 # max evict count for lazy load, 1 by default - indexOffsetCacheEnabled: false # enable index offset cache for some scalar indexes, now is just for bitmap index, enable this param can improve performance for retrieving raw data from index grouping: enabled: true maxNQ: 1000 @@ -462,15 +449,12 @@ queryNode: taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other) maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler - levelZeroForwardPolicy: FilterByBF # delegator level zero deletion forward policy, possible option["FilterByBF", "RemoteLoad"] - streamingDeltaForwardPolicy: FilterByBF # delegator streaming deletion forward policy, possible option["FilterByBF", "Direct"] dataSync: flowGraph: maxQueueLength: 16 # The maximum size of task queue cache in flow graph in query node. maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph enableSegmentPrune: false # use partition stats to prune data in search/query on shard delegator - queryStreamBatchSize: 4194304 # return min batch size of stream query - queryStreamMaxBatchSize: 134217728 # return max batch size of stream query + queryStreamBatchSize: 4194304 # return batch size of stream query bloomFilterApplyParallelFactor: 4 # parallel factor when to apply pk to bloom filter, default to 4*CPU_CORE_NUM workerPooling: size: 10 # the size for worker querynode client pool @@ -551,7 +535,6 @@ dataCoord: # This configuration takes effect only when dataCoord.enableCompaction is set as true. enableAutoCompaction: true indexBasedCompaction: true - taskPrioritizer: default # compaction task prioritizer, options: [default, level]. Default is FIFO, level is prioritized by level: L0 compactions first, then mix compactions, then major compactions. rpcTimeout: 10 maxParallelTaskNum: 10 workerMaxParallelTaskNum: 2 @@ -652,9 +635,8 @@ dataNode: maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode. maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files. readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import. - maxTaskSlotNum: 16 # The maximum number of slots occupied by each import/pre-import task. compaction: - levelZeroBatchMemoryRatio: 0.5 # The minimal memory ratio of free memory for level zero compaction executing in batch mode + levelZeroBatchMemoryRatio: 0.05 # The minimal memory ratio of free memory for level zero compaction executing in batch mode levelZeroMaxBatchSize: -1 # Max batch size refers to the max number of L1/L2 segments in a batch when executing L0 compaction. Default to -1, any value that is less than 1 means no limit. Valid range: >= 1. gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop slot: @@ -761,6 +743,12 @@ tls: serverKeyPath: configs/cert/server.key caPemPath: configs/cert/ca.pem +internaltls: + serverPemPath: configs/cert1/server.pem + serverKeyPath: configs/cert1/server.key + caPemPath: configs/cert1/ca.pem + internalAdd: #pod internal address + common: defaultPartitionName: _default # Name of the default partition when a collection is created defaultIndexName: _default_idx # Name of the index when it is created with name unspecified @@ -781,6 +769,7 @@ common: BeamWidthRatio: 4 gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency. gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time. + bitmapIndexCardinalityBound: 500 storageType: remote # please adjust in embedded Milvus: local, available values are [local, remote, opendal], value minio is deprecated, use remote instead # Default value: auto # Valid values: [auto, avx512, avx2, avx, sse4_2] @@ -793,6 +782,7 @@ common: superUsers: defaultRootPassword: Milvus # default password for root user tlsMode: 0 + internaltlsEnabled : false session: ttl: 30 # ttl value when session granting a lease to register service retryTimes: 30 # retry times when session sending etcd requests @@ -843,7 +833,6 @@ quotaAndLimits: maxCollectionNumPerDB: 65536 # Maximum number of collections per database. maxInsertSize: -1 # maximum size of a single insert request, in bytes, -1 means no limit maxResourceGroupNumOfQueryNode: 1024 # maximum number of resource groups of query nodes - maxGroupSize: 10 # maximum size for one single group when doing search group by ddl: enabled: false # Whether DDL request throttling is enabled. # Maximum number of collection-related DDL requests per second. @@ -995,20 +984,37 @@ quotaAndLimits: diskQuotaPerPartition: -1 # MB, (0, +inf), default no limit l0SegmentsRowCountProtection: enabled: false # switch to enable l0 segment row count quota - lowWaterLevel: 30000000 # l0 segment row count quota, low water level - highWaterLevel: 50000000 # l0 segment row count quota, high water level - deleteBufferRowCountProtection: - enabled: false # switch to enable delete buffer row count quota - lowWaterLevel: 32768 # delete buffer row count quota, low water level - highWaterLevel: 65536 # delete buffer row count quota, high water level - deleteBufferSizeProtection: - enabled: false # switch to enable delete buffer size quota - lowWaterLevel: 134217728 # delete buffer size quota, low water level - highWaterLevel: 268435456 # delete buffer size quota, high water level + lowWaterLevel: 32768 # l0 segment row count quota, low water level + highWaterLevel: 65536 # l0 segment row count quota, low water level limitReading: # forceDeny false means dql requests are allowed (except for some # specific conditions, such as collection has been dropped), true means always reject all dql requests. forceDeny: false + queueProtection: + enabled: false + # nqInQueueThreshold indicated that the system was under backpressure for Search/Query path. + # If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off + # until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1. + # int, default no limit + nqInQueueThreshold: -1 + # queueLatencyThreshold indicated that the system was under backpressure for Search/Query path. + # If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off + # until the latency of queuing no longer exceeds queueLatencyThreshold. + # The latency here refers to the averaged latency over a period of time. + # milliseconds, default no limit + queueLatencyThreshold: -1 + resultProtection: + enabled: false + # maxReadResultRate indicated that the system was under backpressure for Search/Query path. + # If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off + # until the read result rate no longer exceeds maxReadResultRate. + # MB/s, default no limit + maxReadResultRate: -1 + maxReadResultRatePerDB: -1 + maxReadResultRatePerCollection: -1 + # colOffSpeed is the speed of search&query rates cool off. + # (0, 1] + coolOffSpeed: 0.9 trace: # trace exporter type, default is stdout, @@ -1044,17 +1050,4 @@ streamingNode: serverMaxSendSize: 268435456 # The maximum size of each RPC request that the streamingNode can send, unit: byte serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the streamingNode can receive, unit: byte clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can send, unit: byte - clientMaxRecvSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can receive, unit: byte - -# Any configuration related to the streaming service. -streaming: - walBalancer: - # The interval of balance task trigger at background, 1 min by default. - # It's ok to set it into duration string, such as 30s or 1m30s, see time.ParseDuration - triggerInterval: 1m - # The initial interval of balance task trigger backoff, 50 ms by default. - # It's ok to set it into duration string, such as 30s or 1m30s, see time.ParseDuration - backoffInitialInterval: 50ms - backoffMultiplier: 2 # The multiplier of balance task trigger backoff, 2 by default - txn: - defaultKeepaliveTimeout: 10s # The default keepalive timeout for wal txn, 10s by default + clientMaxRecvSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can receive, unit: byte \ No newline at end of file diff --git a/configs/milvus_internaltls.yaml b/configs/milvus_internaltls.yaml new file mode 100644 index 0000000000000..de38db6327c2d --- /dev/null +++ b/configs/milvus_internaltls.yaml @@ -0,0 +1,1053 @@ +# Licensed to the LF AI & Data foundation under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Related configuration of etcd, used to store Milvus metadata & service discovery. +etcd: + # Endpoints used to access etcd service. You can change this parameter as the endpoints of your own etcd cluster. + # Environment variable: ETCD_ENDPOINTS + # etcd preferentially acquires valid address from environment variable ETCD_ENDPOINTS when Milvus is started. + endpoints: localhost:2379 + # Root prefix of the key to where Milvus stores data in etcd. + # It is recommended to change this parameter before starting Milvus for the first time. + # To share an etcd instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them. + # Set an easy-to-identify root path for Milvus if etcd service already exists. + # Changing this for an already running Milvus instance may result in failures to read legacy data. + rootPath: by-dev + # Sub-prefix of the key to where Milvus stores metadata-related information in etcd. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + metaSubPath: meta + # Sub-prefix of the key to where Milvus stores timestamps in etcd. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended not to change this parameter if there is no specific reason. + kvSubPath: kv + log: + level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'. + # path is one of: + # - "default" as os.Stderr, + # - "stderr" as os.Stderr, + # - "stdout" as os.Stdout, + # - file path to append server logs to. + # please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log + path: stdout + ssl: + enabled: false # Whether to support ETCD secure connection mode + tlsCert: /path/to/etcd-client.pem # path to your cert file + tlsKey: /path/to/etcd-client-key.pem # path to your key file + tlsCACert: /path/to/ca.pem # path to your CACert file + # TLS min version + # Optional values: 1.0, 1.1, 1.2, 1.3。 + # We recommend using version 1.2 and above. + tlsMinVersion: 1.3 + requestTimeout: 10000 # Etcd operation timeout in milliseconds + use: + embed: false # Whether to enable embedded Etcd (an in-process EtcdServer). + data: + dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/ + auth: + enabled: false # Whether to enable authentication + userName: # username for etcd authentication + password: # password for etcd authentication + +metastore: + type: etcd # Default value: etcd, Valid values: [etcd, tikv] + +# Related configuration of tikv, used to store Milvus metadata. +# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery. +# TiKV is a good option when the metadata size requires better horizontal scalability. +tikv: + endpoints: 127.0.0.1:2389 # Note that the default pd port of tikv is 2379, which conflicts with etcd. + rootPath: by-dev # The root path where data is stored in tikv + metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath + kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath + requestTimeout: 10000 # ms, tikv request timeout + snapshotScanSize: 256 # batch size of tikv snapshot scan + ssl: + enabled: false # Whether to support TiKV secure connection mode + tlsCert: # path to your cert file + tlsKey: # path to your key file + tlsCACert: # path to your CACert file + +localStorage: + # Local path to where vector data are stored during a search or a query to avoid repetitve access to MinIO or S3 service. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + path: /var/lib/milvus/data/ + +# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus. +# We refer to the storage service as MinIO/S3 in the following description for simplicity. +minio: + # IP address of MinIO or S3 service. + # Environment variable: MINIO_ADDRESS + # minio.address and minio.port together generate the valid access to MinIO or S3 service. + # MinIO preferentially acquires the valid IP address from the environment variable MINIO_ADDRESS when Milvus is started. + # Default value applies when MinIO or S3 is running on the same network with Milvus. + address: localhost + port: 9000 # Port of MinIO or S3 service. + # Access key ID that MinIO or S3 issues to user for authorized access. + # Environment variable: MINIO_ACCESS_KEY_ID or minio.accessKeyID + # minio.accessKeyID and minio.secretAccessKey together are used for identity authentication to access the MinIO or S3 service. + # This configuration must be set identical to the environment variable MINIO_ACCESS_KEY_ID, which is necessary for starting MinIO or S3. + # The default value applies to MinIO or S3 service that started with the default docker-compose.yml file. + accessKeyID: minioadmin + # Secret key used to encrypt the signature string and verify the signature string on server. It must be kept strictly confidential and accessible only to the MinIO or S3 server and users. + # Environment variable: MINIO_SECRET_ACCESS_KEY or minio.secretAccessKey + # minio.accessKeyID and minio.secretAccessKey together are used for identity authentication to access the MinIO or S3 service. + # This configuration must be set identical to the environment variable MINIO_SECRET_ACCESS_KEY, which is necessary for starting MinIO or S3. + # The default value applies to MinIO or S3 service that started with the default docker-compose.yml file. + secretAccessKey: minioadmin + useSSL: false # Switch value to control if to access the MinIO or S3 service through SSL. + ssl: + tlsCACert: /path/to/public.crt # path to your CACert file + # Name of the bucket where Milvus stores data in MinIO or S3. + # Milvus 2.0.0 does not support storing data in multiple buckets. + # Bucket with this name will be created if it does not exist. If the bucket already exists and is accessible, it will be used directly. Otherwise, there will be an error. + # To share an MinIO instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them. For details, see Operation FAQs. + # The data will be stored in the local Docker if Docker is used to start the MinIO service locally. Ensure that there is sufficient storage space. + # A bucket name is globally unique in one MinIO or S3 instance. + bucketName: a-bucket + # Root prefix of the key to where Milvus stores data in MinIO or S3. + # It is recommended to change this parameter before starting Milvus for the first time. + # To share an MinIO instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them. For details, see Operation FAQs. + # Set an easy-to-identify root key prefix for Milvus if etcd service already exists. + # Changing this for an already running Milvus instance may result in failures to read legacy data. + rootPath: files + # Whether to useIAM role to access S3/GCS instead of access/secret keys + # For more information, refer to + # aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html + # gcp: https://cloud.google.com/storage/docs/access-control/iam + # aliyun (ack): https://www.alibabacloud.com/help/en/container-service-for-kubernetes/latest/use-rrsa-to-enforce-access-control + # aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role + useIAM: false + # Cloud Provider of S3. Supports: "aws", "gcp", "aliyun". + # You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio + # You can use "gcp" for other cloud provider supports S3 API with signature v2 + # You can use "aliyun" for other cloud provider uses virtual host style bucket + # When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now + cloudProvider: aws + # Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws". + # Leave it empty if you want to use AWS default endpoint + iamEndpoint: + logLevel: fatal # Log level for aws sdk log. Supported level: off, fatal, error, warn, info, debug, trace + region: # Specify minio storage system location region + useVirtualHost: false # Whether use virtual host mode for bucket + requestTimeoutMs: 10000 # minio timeout for request time in milliseconds + # The maximum number of objects requested per batch in minio ListObjects rpc, + # 0 means using oss client by default, decrease these configration if ListObjects timeout + listObjectsMaxKeys: 0 + +# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka. +# You can change your mq by setting mq.type field. +# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file. +# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka +# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode) +mq: + # Default value: "default" + # Valid values: [default, pulsar, kafka, rocksmq, natsmq] + type: default + enablePursuitMode: true # Default value: "true" + pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds + pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes + mqBufSize: 16 # MQ client consumer buffer length + dispatcher: + mergeCheckInterval: 1 # the interval time(in seconds) for dispatcher to check whether to merge + targetBufSize: 16 # the lenth of channel buffer for targe + maxTolerantLag: 3 # Default value: "3", the timeout(in seconds) that target sends msgPack + +# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services. +pulsar: + # IP address of Pulsar service. + # Environment variable: PULSAR_ADDRESS + # pulsar.address and pulsar.port together generate the valid access to Pulsar. + # Pulsar preferentially acquires the valid IP address from the environment variable PULSAR_ADDRESS when Milvus is started. + # Default value applies when Pulsar is running on the same network with Milvus. + address: localhost + port: 6650 # Port of Pulsar service. + webport: 80 # Web port of of Pulsar service. If you connect direcly without proxy, should use 8080. + # The maximum size of each message in Pulsar. Unit: Byte. + # By default, Pulsar can transmit at most 5 MB of data in a single message. When the size of inserted data is greater than this value, proxy fragments the data into multiple messages to ensure that they can be transmitted correctly. + # If the corresponding parameter in Pulsar remains unchanged, increasing this configuration will cause Milvus to fail, and reducing it produces no advantage. + maxMessageSize: 5242880 + # Pulsar can be provisioned for specific tenants with appropriate capacity allocated to the tenant. + # To share a Pulsar instance among multiple Milvus instances, you can change this to an Pulsar tenant rather than the default one for each Milvus instance before you start them. However, if you do not want Pulsar multi-tenancy, you are advised to change msgChannel.chanNamePrefix.cluster to the different value. + tenant: public + namespace: default # A Pulsar namespace is the administrative unit nomenclature within a tenant. + requestTimeout: 60 # pulsar client global request timeout in seconds + enableClientMetrics: false # Whether to register pulsar client metrics into milvus metrics path. + +# If you want to enable kafka, needs to comment the pulsar configs +# kafka: +# brokerList: +# saslUsername: +# saslPassword: +# saslMechanisms: +# securityProtocol: +# ssl: +# enabled: false # whether to enable ssl mode +# tlsCert: # path to client's public key (PEM) used for authentication +# tlsKey: # path to client's private key (PEM) used for authentication +# tlsCaCert: # file or directory path to CA certificate(s) for verifying the broker's key +# tlsKeyPassword: # private key passphrase for use with ssl.key.location and set_ssl_cert(), if any +# readTimeout: 10 + +rocksmq: + # Prefix of the key to where Milvus stores data in RocksMQ. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + # Set an easy-to-identify root key prefix for Milvus if etcd service already exists. + path: /var/lib/milvus/rdb_data + lrucacheratio: 0.06 # rocksdb cache memory ratio + rocksmqPageSize: 67108864 # The maximum size of messages in each page in RocksMQ. Messages in RocksMQ are checked and cleared (when expired) in batch based on this parameters. Unit: Byte. + retentionTimeInMinutes: 4320 # The maximum retention time of acked messages in RocksMQ. Acked messages in RocksMQ are retained for the specified period of time and then cleared. Unit: Minute. + retentionSizeInMB: 8192 # The maximum retention size of acked messages of each topic in RocksMQ. Acked messages in each topic are cleared if their size exceed this parameter. Unit: MB. + compactionInterval: 86400 # Time interval to trigger rocksdb compaction to remove deleted data. Unit: Second + compressionTypes: 0,0,7,7,7 # compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level. + +# natsmq configuration. +# more detail: https://docs.nats.io/running-a-nats-service/configuration +natsmq: + server: + port: 4222 # Listening port of the NATS server. + storeDir: /var/lib/milvus/nats # Directory to use for JetStream storage of nats + maxFileStore: 17179869184 # Maximum size of the 'file' storage + maxPayload: 8388608 # Maximum number of bytes in a message payload + maxPending: 67108864 # Maximum number of bytes buffered for a connection Applies to client connections + initializeTimeout: 4000 # waiting for initialization of natsmq finished + monitor: + trace: false # If true enable protocol trace log messages + debug: false # If true enable debug log messages + logTime: true # If set to false, log without timestamps. + logFile: /tmp/milvus/logs/nats.log # Log file path relative to .. of milvus binary if use relative path + logSizeLimit: 536870912 # Size in bytes after the log file rolls over to a new one + retention: + maxAge: 4320 # Maximum age of any message in the P-channel + maxBytes: # How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size + maxMsgs: # How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit + +# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests +rootCoord: + dmlChannelNum: 16 # The number of DML-Channels to create at the root coord startup. + # The maximum number of partitions in each collection. + # New partitions cannot be created if this parameter is set as 0 or 1. + # Range: [0, INT64MAX] + maxPartitionNum: 1024 + # The minimum row count of a segment required for creating index. + # Segments with smaller size than this parameter will not be indexed, and will be searched with brute force. + minSegmentSizeToEnableIndex: 1024 + enableActiveStandby: false + maxDatabaseNum: 64 # Maximum number of database + maxGeneralCapacity: 65536 # upper limit for the sum of of product of partitionNumber and shardNumber + gracefulStopTimeout: 5 # seconds. force stop node without graceful stop + ip: # TCP/IP address of rootCoord. If not specified, use the first unicastable address + port: 53100 # TCP port of rootCoord + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the rootCoord can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the rootCoord can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on rootCoord can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on rootCoord can receive, unit: byte + +# Related configuration of proxy, used to validate client requests and reduce the returned results. +proxy: + timeTickInterval: 200 # The interval at which proxy synchronizes the time tick, unit: ms. + healthCheckTimeout: 3000 # ms, the interval that to do component healthy check + msgStream: + timeTick: + bufSize: 512 # The maximum number of messages can be buffered in the timeTick message stream of the proxy when producing messages. + maxNameLength: 255 # The maximum length of the name or alias that can be created in Milvus, including the collection name, collection alias, partition name, and field name. + maxFieldNum: 64 # The maximum number of field can be created when creating in a collection. It is strongly DISCOURAGED to set maxFieldNum >= 64. + maxVectorFieldNum: 4 # The maximum number of vector fields that can be specified in a collection. Value range: [1, 10]. + maxShardNum: 16 # The maximum number of shards can be created when creating in a collection. + maxDimension: 32768 # The maximum number of dimensions of a vector can have when creating in a collection. + # Whether to produce gin logs.\n + # please adjust in embedded Milvus: false + ginLogging: true + ginLogSkipPaths: / # skip url path for gin log + maxTaskNum: 1024 # The maximum number of tasks in the task queue of the proxy. + mustUsePartitionKey: false # switch for whether proxy must use partition key for the collection + accessLog: + enable: false # Whether to enable the access log feature. + minioEnable: false # Whether to upload local access log files to MinIO. This parameter can be specified when proxy.accessLog.filename is not empty. + localPath: /tmp/milvus_access # The local folder path where the access log file is stored. This parameter can be specified when proxy.accessLog.filename is not empty. + filename: # The name of the access log file. If you leave this parameter empty, access logs will be printed to stdout. + maxSize: 64 # The maximum size allowed for a single access log file. If the log file size reaches this limit, a rotation process will be triggered. This process seals the current access log file, creates a new log file, and clears the contents of the original log file. Unit: MB. + rotatedTime: 0 # The maximum time interval allowed for rotating a single access log file. Upon reaching the specified time interval, a rotation process is triggered, resulting in the creation of a new access log file and sealing of the previous one. Unit: seconds + remotePath: access_log/ # The path of the object storage for uploading access log files. + remoteMaxTime: 0 # The time interval allowed for uploading access log files. If the upload time of a log file exceeds this interval, the file will be deleted. Setting the value to 0 disables this feature. + formatters: + base: + format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]" + query: + format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]" + methods: "Query,Search,Delete" + cacheSize: 0 # Size of log of write cache, in byte. (Close write cache if size was 0) + cacheFlushInterval: 3 # time interval of auto flush write cache, in seconds. (Close auto flush if interval was 0) + connectionCheckIntervalSeconds: 120 # the interval time(in seconds) for connection manager to scan inactive client info + connectionClientInfoTTLSeconds: 86400 # inactive client info TTL duration, in seconds + maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos + gracefulStopTimeout: 30 # seconds. force stop node without graceful stop + slowQuerySpanInSeconds: 5 # query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds. + queryNodePooling: + size: 10 # the size for shardleader(querynode) client pool + http: + enabled: true # Whether to enable the http server + debug_mode: false # Whether to enable http server debug mode + port: # high-level restful api + acceptTypeAllowInt64: true # high-level restful api, whether http client can deal with int64 + enablePprof: true # Whether to enable pprof middleware on the metrics port + ip: # TCP/IP address of proxy. If not specified, use the first unicastable address + port: 19530 # TCP port of proxy + internalPort: 19529 + grpc: + serverMaxSendSize: 268435456 # The maximum size of each RPC request that the proxy can send, unit: byte + serverMaxRecvSize: 67108864 # The maximum size of each RPC request that the proxy can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on proxy can send, unit: byte + clientMaxRecvSize: 67108864 # The maximum size of each RPC request that the clients on proxy can receive, unit: byte + +# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments. +queryCoord: + taskMergeCap: 1 + taskExecutionCap: 256 + # Switch value to control if to automatically replace a growing segment with the corresponding indexed sealed segment when the growing segment reaches the sealing threshold. + # If this parameter is set false, Milvus simply searches the growing segments with brute force. + autoHandoff: true + autoBalance: true # Switch value to control if to automatically balance the memory usage among query nodes by distributing segment loading and releasing operations evenly. + autoBalanceChannel: true # Enable auto balance channel + balancer: ScoreBasedBalancer # auto balancer used for segments on queryNodes + globalRowCountFactor: 0.1 # the weight used when balancing segments among queryNodes + scoreUnbalanceTolerationFactor: 0.05 # the least value for unbalanced extent between from and to nodes when doing balance + reverseUnBalanceTolerationFactor: 1.3 # the largest value for unbalanced extent between from and to nodes after doing balance + overloadedMemoryThresholdPercentage: 90 # The threshold of memory usage (in percentage) in a query node to trigger the sealed segment balancing. + balanceIntervalSeconds: 60 # The interval at which query coord balances the memory usage among query nodes. + memoryUsageMaxDifferencePercentage: 30 # The threshold of memory usage difference (in percentage) between any two query nodes to trigger the sealed segment balancing. + rowCountFactor: 0.4 # the row count weight used when balancing segments among queryNodes + segmentCountFactor: 0.4 # the segment count weight used when balancing segments among queryNodes + globalSegmentCountFactor: 0.1 # the segment count weight used when balancing segments among queryNodes + segmentCountMaxSteps: 50 # segment count based plan generator max steps + rowCountMaxSteps: 50 # segment count based plan generator max steps + randomMaxSteps: 10 # segment count based plan generator max steps + growingRowCountWeight: 4 # the memory weight of growing segment row count + delegatorMemoryOverloadFactor: 0.1 # the factor of delegator overloaded memory + balanceCostThreshold: 0.001 # the threshold of balance cost, if the difference of cluster's cost after executing the balance plan is less than this value, the plan will not be executed + checkSegmentInterval: 1000 + checkChannelInterval: 1000 + checkBalanceInterval: 10000 + checkIndexInterval: 10000 + channelTaskTimeout: 60000 # 1 minute + segmentTaskTimeout: 120000 # 2 minute + distPullInterval: 500 + heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available + loadTimeoutSeconds: 600 + distRequestTimeout: 5000 # the request timeout for querycoord fetching data distribution from querynodes, in milliseconds + heatbeatWarningLag: 5000 # the lag value for querycoord report warning when last heatbeat is too old, in milliseconds + checkHandoffInterval: 5000 + enableActiveStandby: false + checkInterval: 1000 + checkHealthInterval: 3000 # 3s, the interval when query coord try to check health of query node + checkHealthRPCTimeout: 2000 # 100ms, the timeout of check health rpc to query node + brokerTimeout: 5000 # 5000ms, querycoord broker rpc timeout + collectionRecoverTimes: 3 # if collection recover times reach the limit during loading state, release it + observerTaskParallel: 16 # the parallel observer dispatcher task number + checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config + checkNodeSessionInterval: 60 # the interval(in seconds) of check querynode cluster session + gracefulStopTimeout: 5 # seconds. force stop node without graceful stop + enableStoppingBalance: true # whether enable stopping balance + channelExclusiveNodeFactor: 4 # the least node number for enable channel's exclusive mode + collectionObserverInterval: 200 # the interval of collection observer + checkExecutedFlagInterval: 100 # the interval of check executed flag to force to pull dist + updateCollectionLoadStatusInterval: 5 # 5m, max interval of updating collection loaded status for check health + cleanExcludeSegmentInterval: 60 # the time duration of clean pipeline exclude segment which used for filter invalid data, in seconds + ip: # TCP/IP address of queryCoord. If not specified, use the first unicastable address + port: 19531 # TCP port of queryCoord + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the queryCoord can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the queryCoord can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on queryCoord can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on queryCoord can receive, unit: byte + +# Related configuration of queryNode, used to run hybrid search between vector and scalar data. +queryNode: + stats: + publishInterval: 1000 # The interval that query node publishes the node statistics information, including segment status, cpu usage, memory usage, health status, etc. Unit: ms. + segcore: + knowhereThreadPoolNumRatio: 4 # The number of threads in knowhere's thread pool. If disk is enabled, the pool size will multiply with knowhereThreadPoolNumRatio([1, 32]). + chunkRows: 128 # Row count by which Segcore divides a segment into chunks. + interimIndex: + # Whether to create a temporary index for growing segments and sealed segments not yet indexed, improving search performance. + # Milvus will eventually seals and indexes all segments, but enabling this optimizes search performance for immediate queries following data insertion. + # This defaults to true, indicating that Milvus creates temporary index for growing segments and the sealed segments that are not indexed upon searches. + enableIndex: true + nlist: 128 # temp index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8 + nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist + memExpansionRate: 1.15 # extra memory needed by building interim index + buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num + knowhereScoreConsistency: false # Enable knowhere strong consistency score computation logic + loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments + enableDisk: false # enable querynode load disk index, and search on disk index + maxDiskUsagePercentage: 95 + cache: + memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024 + readAheadPolicy: willneed # The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed` + # options: async, sync, disable. + # Specifies the necessity for warming up the chunk cache. + # 1. If set to "sync" or "async" the original vector data will be synchronously/asynchronously loaded into the + # chunk cache during the load process. This approach has the potential to substantially reduce query/search latency + # for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage; + # 2. If set to "disable" original vector data will only be loaded into the chunk cache during search/query. + warmup: disable + mmap: + vectorField: false # Enable mmap for loading vector data + vectorIndex: false # Enable mmap for loading vector index + scalarField: false # Enable mmap for loading scalar data + scalarIndex: false # Enable mmap for loading scalar index + growingMmapEnabled: false # Enable mmap for using in growing raw data + fixedFileSizeForMmapAlloc: 1 # tmp file size for mmap chunk manager + maxDiskUsagePercentageForMmapAlloc: 50 # disk percentage used in mmap chunk manager + lazyload: + enabled: false # Enable lazyload for loading data + waitTimeout: 30000 # max wait timeout duration in milliseconds before start to do lazyload search and retrieve + requestResourceTimeout: 5000 # max timeout in milliseconds for waiting request resource for lazy load, 5s by default + requestResourceRetryInterval: 2000 # retry interval in milliseconds for waiting request resource for lazy load, 2s by default + maxRetryTimes: 1 # max retry times for lazy load, 1 by default + maxEvictPerRetry: 1 # max evict count for lazy load, 1 by default + grouping: + enabled: true + maxNQ: 1000 + topKMergeRatio: 20 + scheduler: + receiveChanSize: 10240 + unsolvedQueueSize: 10240 + # maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task). + # Max read concurrency would be the value of hardware.GetCPUNum * maxReadConcurrentRatio. + # It defaults to 2.0, which means max read concurrency would be the value of hardware.GetCPUNum * 2. + # Max read concurrency must greater than or equal to 1, and less than or equal to hardware.GetCPUNum * 100. + # (0, 100] + maxReadConcurrentRatio: 1 + cpuRatio: 10 # ratio used to estimate read task cpu usage. + maxTimestampLag: 86400 + scheduleReadPolicy: + # fifo: A FIFO queue support the schedule. + # user-task-polling: + # The user's tasks will be polled one by one and scheduled. + # Scheduling is fair on task granularity. + # The policy is based on the username for authentication. + # And an empty username is considered the same user. + # When there are no multi-users, the policy decay into FIFO" + name: fifo + taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty + enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other) + maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler + dataSync: + flowGraph: + maxQueueLength: 16 # The maximum size of task queue cache in flow graph in query node. + maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph + enableSegmentPrune: false # use partition stats to prune data in search/query on shard delegator + queryStreamBatchSize: 4194304 # return batch size of stream query + bloomFilterApplyParallelFactor: 4 # parallel factor when to apply pk to bloom filter, default to 4*CPU_CORE_NUM + workerPooling: + size: 10 # the size for worker querynode client pool + ip: # TCP/IP address of queryNode. If not specified, use the first unicastable address + port: 21123 # TCP port of queryNode + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the queryNode can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the queryNode can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on queryNode can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on queryNode can receive, unit: byte + +indexCoord: + bindIndexNodeMode: + enable: false + address: localhost:22930 + withCred: false + nodeID: 0 + segment: + minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed + +indexNode: + scheduler: + buildParallel: 1 + enableDisk: true # enable index node build disk vector index + maxDiskUsagePercentage: 95 + ip: # TCP/IP address of indexNode. If not specified, use the first unicastable address + port: 21121 # TCP port of indexNode + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the indexNode can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the indexNode can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on indexNode can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on indexNode can receive, unit: byte + +dataCoord: + channel: + watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer. + legacyVersionWithoutRPCWatch: 2.4.1 # Datanodes <= this version are considered as legacy nodes, which doesn't have rpc based watch(). This is only used during rolling upgrade where legacy nodes won't get new channels + balanceSilentDuration: 300 # The duration after which the channel manager start background channel balancing + balanceInterval: 360 # The interval with which the channel manager check dml channel balance status + checkInterval: 1 # The interval in seconds with which the channel manager advances channel states + notifyChannelOperationTimeout: 5 # Timeout notifing channel operations (in seconds). + segment: + maxSize: 1024 # The maximum size of a segment, unit: MB. datacoord.segment.maxSize and datacoord.segment.sealProportion together determine if a segment can be sealed. + diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index + sealProportion: 0.12 # The minimum proportion to datacoord.segment.maxSize to seal a segment. datacoord.segment.maxSize and datacoord.segment.sealProportion together determine if a segment can be sealed. + sealProportionJitter: 0.1 # segment seal proportion jitter ratio, default value 0.1(10%), if seal proportion is 12%, with jitter=0.1, the actuall applied ratio will be 10.8~12% + assignmentExpiration: 2000 # Expiration time of the segment assignment, unit: ms + allocLatestExpireAttempt: 200 # The time attempting to alloc latest lastExpire from rootCoord after restart + maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60 + # If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than + # minSizeFromIdleToSealed, Milvus will automatically seal it. + # The max idle time of segment in seconds, 10*60. + maxIdleTime: 600 + minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed. + # The max number of binlog file for one segment, the segment will be sealed if + # the number of binlog file reaches to max value. + maxBinlogFileNumber: 32 + smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than + # (smallProportion * segment max # of rows). + # A compaction will happen on small segments if the segment after compaction will have + compactableProportion: 0.85 + # over (compactableProportion * segment max # of rows) rows. + # MUST BE GREATER THAN OR EQUAL TO !!! + # During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%. + expansionRate: 1.25 + sealPolicy: + channel: + # The size threshold in MB, if the total size of growing segments of each shard + # exceeds this threshold, the largest growing segment will be sealed. + growingSegmentsMemSize: 4096 + autoUpgradeSegmentIndex: false # whether auto upgrade segment index to index engine's version + segmentFlushInterval: 2 # the minimal interval duration(unit: Seconds) between flusing operation on same segment + # Switch value to control if to enable segment compaction. + # Compaction merges small-size segments into a large segment, and clears the entities deleted beyond the rentention duration of Time Travel. + enableCompaction: true + compaction: + # Switch value to control if to enable automatic segment compaction during which data coord locates and merges compactable segments in the background. + # This configuration takes effect only when dataCoord.enableCompaction is set as true. + enableAutoCompaction: true + indexBasedCompaction: true + rpcTimeout: 10 + maxParallelTaskNum: 10 + workerMaxParallelTaskNum: 2 + dropTolerance: 86400 # Compaction task will be cleaned after finish longer than this time(in seconds) + gcInterval: 1800 # The time interval in seconds for compaction gc + clustering: + enable: true # Enable clustering compaction + autoEnable: false # Enable auto clustering compaction + triggerInterval: 600 # clustering compaction trigger interval in seconds + minInterval: 3600 # The minimum interval between clustering compaction executions of one collection, to avoid redundant compaction + maxInterval: 259200 # If a collection haven't been clustering compacted for longer than maxInterval, force compact + newDataSizeThreshold: 512m # If new data size is large than newDataSizeThreshold, execute clustering compaction + preferSegmentSizeRatio: 0.8 + maxSegmentSizeRatio: 1 + maxTrainSizeRatio: 0.8 # max data size ratio in Kmeans train, if larger than it, will down sampling to meet this limit + maxCentroidsNum: 10240 # maximum centroids number in Kmeans train + minCentroidsNum: 16 # minimum centroids number in Kmeans train + minClusterSizeRatio: 0.01 # minimum cluster size / avg size in Kmeans train + maxClusterSizeRatio: 10 # maximum cluster size / avg size in Kmeans train + maxClusterSize: 5g # maximum cluster size in Kmeans train + levelzero: + forceTrigger: + minSize: 8388608 # The minmum size in bytes to force trigger a LevelZero Compaction, default as 8MB + maxSize: 67108864 # The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB + deltalogMinNum: 10 # The minimum number of deltalog files to force trigger a LevelZero Compaction + deltalogMaxNum: 30 # The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30 + syncSegmentsInterval: 300 # The time interval for regularly syncing segments + enableGarbageCollection: true # Switch value to control if to enable garbage collection to clear the discarded data in MinIO or S3 service. + gc: + interval: 3600 # The interval at which data coord performs garbage collection, unit: second. + missingTolerance: 86400 # The retention duration of the unrecorded binary log (binlog) files. Setting a reasonably large value for this parameter avoids erroneously deleting the newly created binlog files that lack metadata. Unit: second. + dropTolerance: 10800 # The retention duration of the binlog files of the deleted segments before they are cleared, unit: second. + removeConcurrent: 32 # number of concurrent goroutines to remove dropped s3 objects + scanInterval: 168 # orphan file (file on oss but has not been registered on meta) on object storage garbage collection scanning interval in hours + enableActiveStandby: false + brokerTimeout: 5000 # 5000ms, dataCoord broker rpc timeout + autoBalance: true # Enable auto balance + checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config + import: + filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task. + taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state. + maxSizeInMBPerImportTask: 6144 # To prevent generating of small segments, we will re-group imported files. This parameter represents the sum of file sizes in each group (each ImportTask). + scheduleInterval: 2 # The interval for scheduling import, measured in seconds. + checkIntervalHigh: 2 # The interval for checking import, measured in seconds, is set to a high frequency for the import checker. + checkIntervalLow: 120 # The interval for checking import, measured in seconds, is set to a low frequency for the import checker. + maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request. + waitForIndex: true # Indicates whether the import operation waits for the completion of index building. + gracefulStopTimeout: 5 # seconds. force stop node without graceful stop + slot: + clusteringCompactionUsage: 16 # slot usage of clustering compaction job. + mixCompactionUsage: 8 # slot usage of mix compaction job. + l0DeleteCompactionUsage: 8 # slot usage of l0 compaction job. + ip: # TCP/IP address of dataCoord. If not specified, use the first unicastable address + port: 13333 # TCP port of dataCoord + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the dataCoord can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the dataCoord can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on dataCoord can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on dataCoord can receive, unit: byte + +dataNode: + dataSync: + flowGraph: + maxQueueLength: 16 # Maximum length of task queue in flowgraph + maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph + maxParallelSyncMgrTasks: 256 # The max concurrent sync task number of datanode sync mgr globally + skipMode: + enable: true # Support skip some timetick message to reduce CPU usage + skipNum: 4 # Consume one for every n records skipped + coldTime: 60 # Turn on skip mode after there are only timetick msg for x seconds + segment: + # The maximum size of each binlog file in a segment buffered in memory. Binlog files whose size exceeds this value are then flushed to MinIO or S3 service. + # Unit: Byte + # Setting this parameter too small causes the system to store a small amount of data too frequently. Setting it too large increases the system's demand for memory. + insertBufSize: 16777216 + deleteBufBytes: 16777216 # Max buffer size in bytes to flush del for a single channel, default as 16MB + syncPeriod: 600 # The period to sync segments if buffer is not empty. + memory: + forceSyncEnable: true # Set true to force sync if memory usage is too high + forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced. + checkInterval: 3000 # the interal to check datanode memory usage, in milliseconds + forceSyncWatermark: 0.5 # memory watermark for standalone, upon reaching this watermark, segments will be synced. + timetick: + interval: 500 + channel: + # specify the size of global work pool of all channels + # if this parameter <= 0, will set it as the maximum number of CPUs that can be executing + # suggest to set it bigger on large collection numbers to avoid blocking + workPoolSize: -1 + # specify the size of global work pool for channel checkpoint updating + # if this parameter <= 0, will set it as 10 + updateChannelCheckpointMaxParallel: 10 + updateChannelCheckpointInterval: 60 # the interval duration(in seconds) for datanode to update channel checkpoint of each channel + updateChannelCheckpointRPCTimeout: 20 # timeout in seconds for UpdateChannelCheckpoint RPC call + maxChannelCheckpointsPerPRC: 128 # The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC. + channelCheckpointUpdateTickInSeconds: 10 # The frequency, in seconds, at which the channel checkpoint updater executes updates. + import: + maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode. + maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files. + readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import. + compaction: + levelZeroBatchMemoryRatio: 0.05 # The minimal memory ratio of free memory for level zero compaction executing in batch mode + levelZeroMaxBatchSize: -1 # Max batch size refers to the max number of L1/L2 segments in a batch when executing L0 compaction. Default to -1, any value that is less than 1 means no limit. Valid range: >= 1. + gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop + slot: + slotCap: 16 # The maximum number of tasks(e.g. compaction, importing) allowed to run concurrently on a datanode + clusteringCompaction: + memoryBufferRatio: 0.1 # The ratio of memory buffer of clustering compaction. Data larger than threshold will be flushed to storage. + workPoolSize: 8 # worker pool size for one clustering compaction job. + bloomFilterApplyParallelFactor: 4 # parallel factor when to apply pk to bloom filter, default to 4*CPU_CORE_NUM + storage: + deltalog: json # deltalog format, options: [json, parquet] + ip: # TCP/IP address of dataNode. If not specified, use the first unicastable address + port: 21124 # TCP port of dataNode + grpc: + serverMaxSendSize: 536870912 # The maximum size of each RPC request that the dataNode can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the dataNode can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on dataNode can send, unit: byte + clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on dataNode can receive, unit: byte + +# This topic introduces the message channel-related configurations of Milvus. +msgChannel: + chanNamePrefix: + # Root name prefix of the channel when a message channel is created. + # It is recommended to change this parameter before starting Milvus for the first time. + # To share a Pulsar instance among multiple Milvus instances, consider changing this to a name rather than the default one for each Milvus instance before you start them. + cluster: by-dev + # Sub-name prefix of the message channel where the root coord publishes time tick messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordTimeTick} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + rootCoordTimeTick: rootcoord-timetick + # Sub-name prefix of the message channel where the root coord publishes its own statistics messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordStatistics} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + rootCoordStatistics: rootcoord-statistics + # Sub-name prefix of the message channel where the root coord publishes Data Manipulation Language (DML) messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordDml} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + rootCoordDml: rootcoord-dml + replicateMsg: replicate-msg + # Sub-name prefix of the message channel where the query node publishes time tick messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.queryTimeTick} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + queryTimeTick: queryTimeTick + # Sub-name prefix of the message channel where the data coord publishes time tick messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.dataCoordTimeTick} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + dataCoordTimeTick: datacoord-timetick-channel + # Sub-name prefix of the message channel where the data coord publishes segment information messages. + # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.dataCoordSegmentInfo} + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + dataCoordSegmentInfo: segment-info-channel + subNamePrefix: + # Subscription name prefix of the data coord. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + dataCoordSubNamePrefix: dataCoord + # Subscription name prefix of the data node. + # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data. + # It is recommended to change this parameter before starting Milvus for the first time. + dataNodeSubNamePrefix: dataNode + +# Configures the system log output. +log: + # Milvus log level. Option: debug, info, warn, error, panic, and fatal. + # It is recommended to use debug level under test and development environments, and info level in production environment. + level: info + file: + # Root path to the log files. + # The default value is set empty, indicating to output log files to standard output (stdout) and standard error (stderr). + # If this parameter is set to a valid local path, Milvus writes and stores log files in this path. + # Set this parameter as the path that you have permission to write. + rootPath: + maxSize: 300 # The maximum size of a log file, unit: MB. + maxAge: 10 # The maximum retention time before a log file is automatically cleared, unit: day. The minimum value is 1. + maxBackups: 20 # The maximum number of log files to back up, unit: day. The minimum value is 1. + format: text # Milvus log format. Option: text and JSON + stdout: true # Stdout enable or not + +grpc: + log: + level: WARNING + gracefulStopTimeout: 10 # second, time to wait graceful stop finish + client: + compressionEnabled: false + dialTimeout: 200 + keepAliveTime: 10000 + keepAliveTimeout: 20000 + maxMaxAttempts: 10 + initialBackoff: 0.2 + maxBackoff: 10 + backoffMultiplier: 2 + minResetInterval: 1000 + maxCancelError: 32 + minSessionCheckInterval: 200 + +# Configure the proxy tls enable. +tls: + serverPemPath: configs/cert/server.pem + serverKeyPath: configs/cert/server.key + caPemPath: configs/cert/ca.pem + +internaltls: + serverPemPath: configs/cert1/server.pem + serverKeyPath: configs/cert1/server.key + caPemPath: configs/cert1/ca.pem + internalAdd: ${HOSTNAME} + +common: + defaultPartitionName: _default # Name of the default partition when a collection is created + defaultIndexName: _default_idx # Name of the index when it is created with name unspecified + entityExpiration: -1 # Entity expiration in seconds, CAUTION -1 means never expire + indexSliceSize: 16 # Index slice size in MB + threadCoreCoefficient: + highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority pool + middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority pool + lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority pool + buildIndexThreadPoolRatio: 0.75 + DiskIndex: + MaxDegree: 56 + SearchListSize: 100 + PQCodeBudgetGBRatio: 0.125 + BuildNumThreadsRatio: 1 + SearchCacheBudgetGBRatio: 0.1 + LoadNumThreadRatio: 8 + BeamWidthRatio: 4 + gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency. + gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time. + bitmapIndexCardinalityBound: 500 + storageType: remote # please adjust in embedded Milvus: local, available values are [local, remote, opendal], value minio is deprecated, use remote instead + # Default value: auto + # Valid values: [auto, avx512, avx2, avx, sse4_2] + # This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building. + simdType: auto + security: + authorizationEnabled: false + # The superusers will ignore some system check processes, + # like the old password verification when updating the credential + superUsers: + defaultRootPassword: Milvus # default password for root user + tlsMode: 0 + internaltlsEnabled : true + session: + ttl: 30 # ttl value when session granting a lease to register service + retryTimes: 30 # retry times when session sending etcd requests + locks: + metrics: + enable: false # whether gather statistics for metrics locks + threshold: + info: 500 # minimum milliseconds for printing durations in info level + warn: 1000 # minimum milliseconds for printing durations in warn level + storage: + scheme: s3 + enablev2: false + # Whether to disable the internal time messaging mechanism for the system. + # If disabled (set to false), the system will not allow DML operations, including insertion, deletion, queries, and searches. + # This helps Milvus-CDC synchronize incremental data + ttMsgEnabled: true + traceLogMode: 0 # trace request info + bloomFilterSize: 100000 # bloom filter initial size + bloomFilterType: BlockedBloomFilter # bloom filter type, support BasicBloomFilter and BlockedBloomFilter + maxBloomFalsePositive: 0.001 # max false positive rate for bloom filter + bloomFilterApplyBatchSize: 1000 # batch size when to apply pk to bloom filter + usePartitionKeyAsClusteringKey: false # if true, do clustering compaction and segment prune on partition key field + useVectorAsClusteringKey: false # if true, do clustering compaction and segment prune on vector field + enableVectorClusteringKey: false # if true, enable vector clustering key and vector clustering compaction + +# QuotaConfig, configurations of Milvus quota and limits. +# By default, we enable: +# 1. TT protection; +# 2. Memory protection. +# 3. Disk quota protection. +# You can enable: +# 1. DML throughput limitation; +# 2. DDL, DQL qps/rps limitation; +# 3. DQL Queue length/latency protection; +# 4. DQL result rate protection; +# If necessary, you can also manually force to deny RW requests. +quotaAndLimits: + enabled: true # `true` to enable quota and limits, `false` to disable. + # quotaCenterCollectInterval is the time interval that quotaCenter + # collects metrics from Proxies, Query cluster and Data cluster. + # seconds, (0 ~ 65536) + quotaCenterCollectInterval: 3 + limits: + allocRetryTimes: 15 # retry times when delete alloc forward data from rate limit failed + allocWaitInterval: 1000 # retry wait duration when delete alloc forward data rate failed, in millisecond + complexDeleteLimitEnable: false # whether complex delete check forward data by limiter + maxCollectionNum: 65536 + maxCollectionNumPerDB: 65536 # Maximum number of collections per database. + maxInsertSize: -1 # maximum size of a single insert request, in bytes, -1 means no limit + maxResourceGroupNumOfQueryNode: 1024 # maximum number of resource groups of query nodes + ddl: + enabled: false # Whether DDL request throttling is enabled. + # Maximum number of collection-related DDL requests per second. + # Setting this item to 10 indicates that Milvus processes no more than 10 collection-related DDL requests per second, including collection creation requests, collection drop requests, collection load requests, and collection release requests. + # To use this setting, set quotaAndLimits.ddl.enabled to true at the same time. + collectionRate: -1 + # Maximum number of partition-related DDL requests per second. + # Setting this item to 10 indicates that Milvus processes no more than 10 partition-related requests per second, including partition creation requests, partition drop requests, partition load requests, and partition release requests. + # To use this setting, set quotaAndLimits.ddl.enabled to true at the same time. + partitionRate: -1 + db: + collectionRate: -1 # qps of db level , default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection + partitionRate: -1 # qps of db level, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition + indexRate: + enabled: false # Whether index-related request throttling is enabled. + # Maximum number of index-related requests per second. + # Setting this item to 10 indicates that Milvus processes no more than 10 partition-related requests per second, including index creation requests and index drop requests. + # To use this setting, set quotaAndLimits.indexRate.enabled to true at the same time. + max: -1 + db: + max: -1 # qps of db level, default no limit, rate for CreateIndex, DropIndex + flushRate: + enabled: true # Whether flush request throttling is enabled. + # Maximum number of flush requests per second. + # Setting this item to 10 indicates that Milvus processes no more than 10 flush requests per second. + # To use this setting, set quotaAndLimits.flushRate.enabled to true at the same time. + max: -1 + collection: + max: 0.1 # qps, default no limit, rate for flush at collection level. + db: + max: -1 # qps of db level, default no limit, rate for flush + compactionRate: + enabled: false # Whether manual compaction request throttling is enabled. + # Maximum number of manual-compaction requests per second. + # Setting this item to 10 indicates that Milvus processes no more than 10 manual-compaction requests per second. + # To use this setting, set quotaAndLimits.compaction.enabled to true at the same time. + max: -1 + db: + max: -1 # qps of db level, default no limit, rate for manualCompaction + dml: + enabled: false # Whether DML request throttling is enabled. + insertRate: + # Highest data insertion rate per second. + # Setting this item to 5 indicates that Milvus only allows data insertion at the rate of 5 MB/s. + # To use this setting, set quotaAndLimits.dml.enabled to true at the same time. + max: -1 + db: + max: -1 # MB/s, default no limit + collection: + # Highest data insertion rate per collection per second. + # Setting this item to 5 indicates that Milvus only allows data insertion to any collection at the rate of 5 MB/s. + # To use this setting, set quotaAndLimits.dml.enabled to true at the same time. + max: -1 + partition: + max: -1 # MB/s, default no limit + upsertRate: + max: -1 # MB/s, default no limit + db: + max: -1 # MB/s, default no limit + collection: + max: -1 # MB/s, default no limit + partition: + max: -1 # MB/s, default no limit + deleteRate: + # Highest data deletion rate per second. + # Setting this item to 0.1 indicates that Milvus only allows data deletion at the rate of 0.1 MB/s. + # To use this setting, set quotaAndLimits.dml.enabled to true at the same time. + max: -1 + db: + max: -1 # MB/s, default no limit + collection: + # Highest data deletion rate per second. + # Setting this item to 0.1 indicates that Milvus only allows data deletion from any collection at the rate of 0.1 MB/s. + # To use this setting, set quotaAndLimits.dml.enabled to true at the same time. + max: -1 + partition: + max: -1 # MB/s, default no limit + bulkLoadRate: + max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate + db: + max: -1 # MB/s, default no limit, not support yet. TODO: limit db bulkLoad rate + collection: + max: -1 # MB/s, default no limit, not support yet. TODO: limit collection bulkLoad rate + partition: + max: -1 # MB/s, default no limit, not support yet. TODO: limit partition bulkLoad rate + dql: + enabled: false # Whether DQL request throttling is enabled. + searchRate: + # Maximum number of vectors to search per second. + # Setting this item to 100 indicates that Milvus only allows searching 100 vectors per second no matter whether these 100 vectors are all in one search or scattered across multiple searches. + # To use this setting, set quotaAndLimits.dql.enabled to true at the same time. + max: -1 + db: + max: -1 # vps (vectors per second), default no limit + collection: + # Maximum number of vectors to search per collection per second. + # Setting this item to 100 indicates that Milvus only allows searching 100 vectors per second per collection no matter whether these 100 vectors are all in one search or scattered across multiple searches. + # To use this setting, set quotaAndLimits.dql.enabled to true at the same time. + max: -1 + partition: + max: -1 # vps (vectors per second), default no limit + queryRate: + # Maximum number of queries per second. + # Setting this item to 100 indicates that Milvus only allows 100 queries per second. + # To use this setting, set quotaAndLimits.dql.enabled to true at the same time. + max: -1 + db: + max: -1 # qps, default no limit + collection: + # Maximum number of queries per collection per second. + # Setting this item to 100 indicates that Milvus only allows 100 queries per collection per second. + # To use this setting, set quotaAndLimits.dql.enabled to true at the same time. + max: -1 + partition: + max: -1 # qps, default no limit + limitWriting: + # forceDeny false means dml requests are allowed (except for some + # specific conditions, such as memory of nodes to water marker), true means always reject all dml requests. + forceDeny: false + ttProtection: + enabled: false + # maxTimeTickDelay indicates the backpressure for DML Operations. + # DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay, + # if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected. + # seconds + maxTimeTickDelay: 300 + memProtection: + # When memory usage > memoryHighWaterLevel, all dml requests would be rejected; + # When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate; + # When memory usage < memoryLowWaterLevel, no action. + enabled: true + dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes + dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes + queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes + queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes + growingSegmentsSizeProtection: + # No action will be taken if the growing segments size is less than the low watermark. + # When the growing segments size exceeds the low watermark, the dml rate will be reduced, + # but the rate will not be lower than minRateRatio * dmlRate. + enabled: false + minRateRatio: 0.5 + lowWaterLevel: 0.2 + highWaterLevel: 0.4 + diskProtection: + enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected; + diskQuota: -1 # MB, (0, +inf), default no limit + diskQuotaPerDB: -1 # MB, (0, +inf), default no limit + diskQuotaPerCollection: -1 # MB, (0, +inf), default no limit + diskQuotaPerPartition: -1 # MB, (0, +inf), default no limit + l0SegmentsRowCountProtection: + enabled: false # switch to enable l0 segment row count quota + lowWaterLevel: 32768 # l0 segment row count quota, low water level + highWaterLevel: 65536 # l0 segment row count quota, low water level + limitReading: + # forceDeny false means dql requests are allowed (except for some + # specific conditions, such as collection has been dropped), true means always reject all dql requests. + forceDeny: false + queueProtection: + enabled: false + # nqInQueueThreshold indicated that the system was under backpressure for Search/Query path. + # If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off + # until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1. + # int, default no limit + nqInQueueThreshold: -1 + # queueLatencyThreshold indicated that the system was under backpressure for Search/Query path. + # If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off + # until the latency of queuing no longer exceeds queueLatencyThreshold. + # The latency here refers to the averaged latency over a period of time. + # milliseconds, default no limit + queueLatencyThreshold: -1 + resultProtection: + enabled: false + # maxReadResultRate indicated that the system was under backpressure for Search/Query path. + # If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off + # until the read result rate no longer exceeds maxReadResultRate. + # MB/s, default no limit + maxReadResultRate: -1 + maxReadResultRatePerDB: -1 + maxReadResultRatePerCollection: -1 + # colOffSpeed is the speed of search&query rates cool off. + # (0, 1] + coolOffSpeed: 0.9 + +trace: + # trace exporter type, default is stdout, + # optional values: ['noop','stdout', 'jaeger', 'otlp'] + exporter: noop + # fraction of traceID based sampler, + # optional values: [0, 1] + # Fractions >= 1 will always sample. Fractions < 0 are treated as zero. + sampleFraction: 0 + jaeger: + url: # when exporter is jaeger should set the jaeger's URL + otlp: + endpoint: # example: "127.0.0.1:4317" for grpc, "127.0.0.1:4318" for http + method: # otlp export method, acceptable values: ["grpc", "http"], using "grpc" by default + secure: true + initTimeoutSeconds: 10 # segcore initialization timeout in seconds, preventing otlp grpc hangs forever + +#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation. +#here, you can set the size of the memory occupied by the memory pool, with the unit being MB. +#note that there is a possibility of Milvus crashing when the actual memory demand exceeds the value set by maxMemSize. +#if initMemSize and MaxMemSize both set zero, +#milvus will automatically initialize half of the available GPU memory, +#maxMemSize will the whole available GPU memory. +gpu: + initMemSize: 2048 # Gpu Memory Pool init size + maxMemSize: 4096 # Gpu Memory Pool Max size + +# Any configuration related to the streaming node server. +streamingNode: + ip: # TCP/IP address of streamingNode. If not specified, use the first unicastable address + port: 22222 # TCP port of streamingNode + grpc: + serverMaxSendSize: 268435456 # The maximum size of each RPC request that the streamingNode can send, unit: byte + serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the streamingNode can receive, unit: byte + clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can send, unit: byte + clientMaxRecvSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can receive, unit: byte diff --git a/internal/distributed/datacoord/client/client.go b/internal/distributed/datacoord/client/client.go index df5fecb1af4f6..92d0e91193268 100644 --- a/internal/distributed/datacoord/client/client.go +++ b/internal/distributed/datacoord/client/client.go @@ -26,6 +26,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" @@ -70,7 +71,15 @@ func NewClient(ctx context.Context) (types.DataCoordClient, error) { client.grpcClient.SetGetAddrFunc(client.getDataCoordAddr) client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetSession(sess) - + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.DataCoordGrpcClientCfg.InternalTLSCaPemPath.GetValue(), "Datacoord") + if err != nil { + log.Error("Failed to create cert pool for Datacoord client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/datacoord/client/client_test.go b/internal/distributed/datacoord/client/client_test.go index c46dab7235126..796d86884ba79 100644 --- a/internal/distributed/datacoord/client/client_test.go +++ b/internal/distributed/datacoord/client/client_test.go @@ -25,12 +25,9 @@ import ( "time" "github.com/cockroachdb/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "go.uber.org/zap" - "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/indexpb" @@ -39,6 +36,10 @@ import ( "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/merr" "github.com/milvus-io/milvus/pkg/util/paramtable" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + testify_mock "github.com/stretchr/testify/mock" + "go.uber.org/zap" ) var mockErr = errors.New("mock grpc err") @@ -72,6 +73,84 @@ func Test_NewClient(t *testing.T) { assert.NoError(t, err) } +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + + ctx := context.Background() + client, err := NewClient(ctx) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockDC := mocks.NewMockDataCoordClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[datapb.DataCoordClient](t) + + // Set mock expectations + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(datapb.DataCoordClient) (interface{}, error)) (interface{}, error) { + return f(mockDC) + }) + // Sub-test for nil cert pool + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) // Simulate no cert pool + + mockDC.EXPECT().Flush(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.Flush(ctx, &datapb.FlushRequest{}) + assert.Error(t, err) // Check for an error + assert.Equal(t, ErrNoCertPool, err) // Check that it's the expected error + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "datacoord") + assert.NotNil(t, err) // Expect an error while creating cert pool + assert.Nil(t, cp) // Cert pool should be nil + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "datacoord") + assert.Nil(t, err) + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + + mockDC.ExpectedCalls = nil + mockDC.EXPECT().Flush(mock.Anything, mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.Flush(ctx, &datapb.FlushRequest{}) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("SuccessfulFlush", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "datacoord") + assert.NoError(t, err) + assert.NotNil(t, cp) + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + mockDC.ExpectedCalls = nil + mockDC.EXPECT().Flush(mock.Anything, mock.Anything).Return(&datapb.FlushResponse{ + Status: merr.Success(), + }, mockErr) + + _, err = client.Flush(ctx, &datapb.FlushRequest{}) + assert.NotNil(t, err) + }) +} + func Test_GetComponentStates(t *testing.T) { paramtable.Init() diff --git a/internal/distributed/datacoord/service.go b/internal/distributed/datacoord/service.go index 998bb21106ab7..7748de14c1369 100644 --- a/internal/distributed/datacoord/service.go +++ b/internal/distributed/datacoord/service.go @@ -43,7 +43,6 @@ import ( "github.com/milvus-io/milvus/internal/util/streamingutil" streamingserviceinterceptor "github.com/milvus-io/milvus/internal/util/streamingutil/service/interceptor" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" @@ -174,7 +173,7 @@ func (s *Server) startGrpcLoop() { Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead } - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -200,8 +199,10 @@ func (s *Server) startGrpcLoop() { return s.serverID.Load() }), streamingserviceinterceptor.NewStreamingServiceStreamServerInterceptor(), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler())) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("DataCoord")) + s.grpcServer = grpc.NewServer(grpcOpts...) indexpb.RegisterIndexCoordServer(s.grpcServer, s) datapb.RegisterDataCoordServer(s.grpcServer, s) // register the streaming coord grpc service. diff --git a/internal/distributed/datanode/client/client.go b/internal/distributed/datanode/client/client.go index 67d5081a19e8c..98b1ad7a0e6ea 100644 --- a/internal/distributed/datanode/client/client.go +++ b/internal/distributed/datanode/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/types" @@ -71,7 +72,15 @@ func NewClient(ctx context.Context, addr string, serverID int64) (types.DataNode client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetNodeID(serverID) client.grpcClient.SetSession(sess) - + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.DataNodeGrpcClientCfg.InternalTLSCaPemPath.GetValue(), "DataNode") + if err != nil { + log.Error("Failed to create cert pool for DataNode client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/datanode/client/client_test.go b/internal/distributed/datanode/client/client_test.go index 03e4b64e74e62..a270054d896ff 100644 --- a/internal/distributed/datanode/client/client_test.go +++ b/internal/distributed/datanode/client/client_test.go @@ -19,11 +19,16 @@ package grpcdatanodeclient import ( "context" "testing" + "time" "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" + testify_mock "github.com/stretchr/testify/mock" "google.golang.org/grpc" + "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" + "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/util/mock" "github.com/milvus-io/milvus/pkg/util/paramtable" @@ -125,3 +130,89 @@ func Test_NewClient(t *testing.T) { err = client.Close() assert.NoError(t, err) } + +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx, "test", 1) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockDataNode := mocks.NewMockDataNodeClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[datapb.DataNodeClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(datapb.DataNodeClient) (interface{}, error)) (interface{}, error) { + return f(mockDataNode) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockDataNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "datanode") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "datanode") + assert.Nil(t, err) + mockDataNode.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockDataNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "datanode") + assert.Nil(t, err) + mockDataNode.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockDataNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} diff --git a/internal/distributed/datanode/service.go b/internal/distributed/datanode/service.go index 5e4ae6f0095e9..1b73f8784db9f 100644 --- a/internal/distributed/datanode/service.go +++ b/internal/distributed/datanode/service.go @@ -42,7 +42,6 @@ import ( "github.com/milvus-io/milvus/internal/util/dependency" _ "github.com/milvus-io/milvus/internal/util/grpcclient" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/interceptor" @@ -128,8 +127,7 @@ func (s *Server) startGrpcLoop() { Time: 60 * time.Second, // Ping the client if it is idle for 60 seconds to ensure the connection is still active Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead } - - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -153,8 +151,10 @@ func (s *Server) startGrpcLoop() { } return s.serverID.Load() }), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler())) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("DataNode")) + s.grpcServer = grpc.NewServer(grpcOpts...) datapb.RegisterDataNodeServer(s.grpcServer, s) ctx, cancel := context.WithCancel(s.ctx) diff --git a/internal/distributed/indexnode/client/client.go b/internal/distributed/indexnode/client/client.go index cb301bd7d61ef..35e53b8e2cf06 100644 --- a/internal/distributed/indexnode/client/client.go +++ b/internal/distributed/indexnode/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/workerpb" "github.com/milvus-io/milvus/internal/types" @@ -72,6 +73,15 @@ func NewClient(ctx context.Context, addr string, nodeID int64, encryption bool) if encryption { client.grpcClient.EnableEncryption() } + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.IndexNodeGrpcClientCfg.InternalTLSCaPemPath.GetValue(), "IndexNode") + if err != nil { + log.Error("Failed to create cert pool for IndexNode client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/indexnode/client/client_test.go b/internal/distributed/indexnode/client/client_test.go index afa41c152e832..042ef716ced4c 100644 --- a/internal/distributed/indexnode/client/client_test.go +++ b/internal/distributed/indexnode/client/client_test.go @@ -18,6 +18,7 @@ package grpcindexnodeclient import ( "context" + "errors" "math/rand" "os" "strings" @@ -28,13 +29,17 @@ import ( "github.com/stretchr/testify/mock" "go.uber.org/zap" + "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/mocks" + "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/workerpb" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/metricsinfo" "github.com/milvus-io/milvus/pkg/util/paramtable" + testify_mock "github.com/stretchr/testify/mock" ) func TestMain(m *testing.M) { @@ -177,3 +182,89 @@ func TestIndexNodeClient(t *testing.T) { err = client.Close() assert.NoError(t, err) } + +func Test_InternalTLS_IndexNode(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx, "test", 1, false) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockIndexNode := mocks.NewMockIndexNodeClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[indexpb.IndexNodeClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(indexpb.IndexNodeClient) (interface{}, error)) (interface{}, error) { + return f(mockIndexNode) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockIndexNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "indexnode") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "indexnode") + assert.Nil(t, err) + mockIndexNode.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockIndexNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "indexnode") + assert.Nil(t, err) + mockIndexNode.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockIndexNode.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} diff --git a/internal/distributed/indexnode/service.go b/internal/distributed/indexnode/service.go index 403343ee907c4..dfd25e83e769d 100644 --- a/internal/distributed/indexnode/service.go +++ b/internal/distributed/indexnode/service.go @@ -33,13 +33,13 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/indexnode" + "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/workerpb" "github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/internal/util/dependency" _ "github.com/milvus-io/milvus/internal/util/grpcclient" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/interceptor" @@ -114,7 +114,7 @@ func (s *Server) startGrpcLoop() { Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead } - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -138,9 +138,11 @@ func (s *Server) startGrpcLoop() { } return s.serverID.Load() }), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler())) - workerpb.RegisterIndexNodeServer(s.grpcServer, s) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("IndexNode")) + s.grpcServer = grpc.NewServer(grpcOpts...) + indexpb.RegisterIndexNodeServer(s.grpcServer, s) go funcutil.CheckGrpcReady(ctx, s.grpcErrChan) if err := s.grpcServer.Serve(s.listener); err != nil { s.grpcErrChan <- err diff --git a/internal/distributed/proxy/client/client.go b/internal/distributed/proxy/client/client.go index 549cc9671930c..2b8844abd9abf 100644 --- a/internal/distributed/proxy/client/client.go +++ b/internal/distributed/proxy/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/internal/types" @@ -69,6 +70,15 @@ func NewClient(ctx context.Context, addr string, nodeID int64) (types.ProxyClien client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetNodeID(nodeID) client.grpcClient.SetSession(sess) + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.ProxyGrpcServerCfg.InternalTLSCaPemPath.GetValue(), "Proxy") + if err != nil { + log.Error("Failed to create cert pool for Proxy client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/proxy/client/client_test.go b/internal/distributed/proxy/client/client_test.go index e43b02869cbf2..ae23a3ffad1b9 100644 --- a/internal/distributed/proxy/client/client_test.go +++ b/internal/distributed/proxy/client/client_test.go @@ -18,6 +18,7 @@ package grpcproxyclient import ( "context" + "errors" "testing" "time" @@ -25,11 +26,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/pkg/util/merr" "github.com/milvus-io/milvus/pkg/util/paramtable" + testify_mock "github.com/stretchr/testify/mock" ) func Test_NewClient(t *testing.T) { @@ -49,6 +52,91 @@ func Test_NewClient(t *testing.T) { assert.NoError(t, err) } +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx, "test", 1) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockProxy := mocks.NewMockProxyClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[proxypb.ProxyClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(proxypb.ProxyClient) (interface{}, error)) (interface{}, error) { + return f(mockProxy) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockProxy.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "proxy") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "proxy") + assert.Nil(t, err) + mockProxy.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockProxy.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "proxy") + assert.Nil(t, err) + mockProxy.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockProxy.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} func Test_GetComponentStates(t *testing.T) { paramtable.Init() diff --git a/internal/distributed/proxy/service.go b/internal/distributed/proxy/service.go index b7f6e768d269f..050e68c3f99c5 100644 --- a/internal/distributed/proxy/service.go +++ b/internal/distributed/proxy/service.go @@ -404,7 +404,7 @@ func (s *Server) startInternalGrpc(errChan chan error) { } opts := tracer.GetInterceptorOpts() - s.grpcInternalServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -428,7 +428,10 @@ func (s *Server) startInternalGrpc(errChan chan error) { } return s.serverID.Load() }), - ))) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("Proxy")) + s.grpcInternalServer = grpc.NewServer(grpcOpts...) proxypb.RegisterProxyServer(s.grpcInternalServer, s) grpc_health_v1.RegisterHealthServer(s.grpcInternalServer, s) errChan <- nil diff --git a/internal/distributed/querycoord/client/client.go b/internal/distributed/querycoord/client/client.go index 97ebaf3cb68f9..1eb62fe13621b 100644 --- a/internal/distributed/querycoord/client/client.go +++ b/internal/distributed/querycoord/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/types" @@ -62,7 +63,15 @@ func NewClient(ctx context.Context) (types.QueryCoordClient, error) { client.grpcClient.SetGetAddrFunc(client.getQueryCoordAddr) client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetSession(sess) - + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.QueryCoordGrpcServerCfg.InternalTLSCaPemPath.GetValue(), "QueryCoord") + if err != nil { + log.Error("Failed to create cert pool for QueryCoord client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/querycoord/client/client_test.go b/internal/distributed/querycoord/client/client_test.go index 0b14ed48b2fa0..caa5179066189 100644 --- a/internal/distributed/querycoord/client/client_test.go +++ b/internal/distributed/querycoord/client/client_test.go @@ -29,11 +29,15 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" + "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" + "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/util/mock" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/paramtable" + testify_mock "github.com/stretchr/testify/mock" ) func TestMain(m *testing.M) { @@ -224,3 +228,89 @@ func Test_NewClient(t *testing.T) { err = client.Close() assert.NoError(t, err) } + +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockQC := mocks.NewMockQueryCoordClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[querypb.QueryCoordClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(querypb.QueryCoordClient) (interface{}, error)) (interface{}, error) { + return f(mockQC) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockQC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "querycoord") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "querycoord") + assert.Nil(t, err) + mockQC.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockQC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "querycoord") + assert.Nil(t, err) + mockQC.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockQC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} diff --git a/internal/distributed/querycoord/service.go b/internal/distributed/querycoord/service.go index 25b903c4edc9c..333ed591e9694 100644 --- a/internal/distributed/querycoord/service.go +++ b/internal/distributed/querycoord/service.go @@ -42,7 +42,6 @@ import ( "github.com/milvus-io/milvus/internal/util/dependency" _ "github.com/milvus-io/milvus/internal/util/grpcclient" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" @@ -230,7 +229,7 @@ func (s *Server) startGrpcLoop() { ctx, cancel := context.WithCancel(s.loopCtx) defer cancel() - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -254,9 +253,10 @@ func (s *Server) startGrpcLoop() { } return s.serverID.Load() }), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler()), - ) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("QueryCoord")) + s.grpcServer = grpc.NewServer(grpcOpts...) querypb.RegisterQueryCoordServer(s.grpcServer, s) go funcutil.CheckGrpcReady(ctx, s.grpcErrChan) diff --git a/internal/distributed/querynode/client/client.go b/internal/distributed/querynode/client/client.go index 5d15fad49f6c1..409aabcc26a03 100644 --- a/internal/distributed/querynode/client/client.go +++ b/internal/distributed/querynode/client/client.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/types" @@ -37,6 +38,8 @@ import ( "github.com/milvus-io/milvus/pkg/util/typeutil" ) +var Params *paramtable.ComponentParam = paramtable.Get() + // Client is the grpc client of QueryNode. type Client struct { grpcClient grpcclient.GrpcClient[querypb.QueryNodeClient] @@ -69,7 +72,15 @@ func NewClient(ctx context.Context, addr string, nodeID int64) (types.QueryNodeC client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetNodeID(nodeID) client.grpcClient.SetSession(sess) - + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.QueryNodeGrpcClientCfg.InternalTLSCaPemPath.GetValue(), "QueryNode") + if err != nil { + log.Error("Failed to create cert pool for QueryNode client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/querynode/client/client_test.go b/internal/distributed/querynode/client/client_test.go index fb6857c44f2ad..82464f5bfe660 100644 --- a/internal/distributed/querynode/client/client_test.go +++ b/internal/distributed/querynode/client/client_test.go @@ -19,14 +19,18 @@ package grpcquerynodeclient import ( "context" "testing" + "time" "github.com/cockroachdb/errors" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - + "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" + "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/util/mock" "github.com/milvus-io/milvus/pkg/util/paramtable" + "github.com/stretchr/testify/assert" + testify_mock "github.com/stretchr/testify/mock" + "google.golang.org/grpc" ) func Test_NewClient(t *testing.T) { @@ -158,3 +162,89 @@ func Test_NewClient(t *testing.T) { err = client.Close() assert.NoError(t, err) } + +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx, "test", 1) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockQN := mocks.NewMockQueryNodeClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[querypb.QueryNodeClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(querypb.QueryNodeClient) (interface{}, error)) (interface{}, error) { + return f(mockQN) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockQN.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "querynode") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "querynode") + assert.Nil(t, err) + mockQN.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockQN.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "querynode") + assert.Nil(t, err) + mockQN.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockQN.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} diff --git a/internal/distributed/querynode/service.go b/internal/distributed/querynode/service.go index e66884681aa5f..ceafc0d526096 100644 --- a/internal/distributed/querynode/service.go +++ b/internal/distributed/querynode/service.go @@ -39,7 +39,6 @@ import ( "github.com/milvus-io/milvus/internal/util/dependency" _ "github.com/milvus-io/milvus/internal/util/grpcclient" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/interceptor" @@ -175,8 +174,7 @@ func (s *Server) startGrpcLoop() { Time: 60 * time.Second, // Ping the client if it is idle for 60 seconds to ensure the connection is still active Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead } - - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -202,9 +200,10 @@ func (s *Server) startGrpcLoop() { } return s.serverID.Load() }), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler()), - ) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("QueryNode")) + s.grpcServer = grpc.NewServer(grpcOpts...) querypb.RegisterQueryNodeServer(s.grpcServer, s) ctx, cancel := context.WithCancel(s.ctx) diff --git a/internal/distributed/rootcoord/client/client.go b/internal/distributed/rootcoord/client/client.go index cabb8a33610db..7beb10fa40e5c 100644 --- a/internal/distributed/rootcoord/client/client.go +++ b/internal/distributed/rootcoord/client/client.go @@ -27,6 +27,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" @@ -69,7 +70,15 @@ func NewClient(ctx context.Context) (types.RootCoordClient, error) { client.grpcClient.SetGetAddrFunc(client.getRootCoordAddr) client.grpcClient.SetNewGrpcClientFunc(client.newGrpcClient) client.grpcClient.SetSession(sess) - + if config.InternalTLSEnabled.GetAsBool() { + client.grpcClient.EnableEncryption() + cp, err := utils.CreateCertPoolforClient(Params.RootCoordGrpcClientCfg.InternalTLSCaPemPath.GetValue(), "RootCoord") + if err != nil { + log.Error("Failed to create cert pool for RootCoord client") + return nil, err + } + client.grpcClient.SetInternalTLSCertPool(cp) + } return client, nil } diff --git a/internal/distributed/rootcoord/client/client_test.go b/internal/distributed/rootcoord/client/client_test.go index 42d922dd41794..46d394ec4f355 100644 --- a/internal/distributed/rootcoord/client/client_test.go +++ b/internal/distributed/rootcoord/client/client_test.go @@ -25,15 +25,18 @@ import ( "time" "github.com/cockroachdb/errors" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "google.golang.org/grpc" - + "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/distributed/utils" + "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/internal/util/mock" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/paramtable" + "github.com/stretchr/testify/assert" + testify_mock "github.com/stretchr/testify/mock" + "go.uber.org/zap" + "google.golang.org/grpc" ) func TestMain(m *testing.M) { @@ -458,3 +461,89 @@ func Test_NewClient(t *testing.T) { err = client.Close() assert.NoError(t, err) } + +func Test_InternalTLS(t *testing.T) { + paramtable.Init() + validPath := "../../../../configs/cert1/ca.pem" + ctx := context.Background() + client, err := NewClient(ctx) + assert.NoError(t, err) + assert.NotNil(t, client) + defer client.Close() + + mockRC := mocks.NewMockRootCoordClient(t) + mockGrpcClient := mocks.NewMockGrpcClient[rootcoordpb.RootCoordClient](t) + + mockGrpcClient.EXPECT().Close().Return(nil) + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockGrpcClient.EXPECT().ReCall(testify_mock.Anything, testify_mock.Anything).RunAndReturn(func(ctx context.Context, f func(rootcoordpb.RootCoordClient) (interface{}, error)) (interface{}, error) { + return f(mockRC) + }) + + t.Run("NoCertPool", func(t *testing.T) { + var ErrNoCertPool = errors.New("no cert pool") + mockGrpcClient.EXPECT().SetInternalTLSCertPool(testify_mock.Anything).Return().Once() + client.(*Client).grpcClient = mockGrpcClient + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(nil) + + mockRC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, ErrNoCertPool) + + _, err := client.GetComponentStates(ctx, nil) + assert.Error(t, err) + assert.Equal(t, ErrNoCertPool, err) + }) + + // Sub-test for invalid certificate path + t.Run("InvalidCertPath", func(t *testing.T) { + invalidCAPath := "invalid/path/to/ca.pem" + cp, err := utils.CreateCertPoolforClient(invalidCAPath, "rootcoord") + assert.NotNil(t, err) + assert.Nil(t, cp) + }) + + // Sub-test for TLS handshake failure + t.Run("TlsHandshakeFailed", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "rootcoord") + assert.Nil(t, err) + mockRC.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockRC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(nil, errors.New("TLS handshake failed")) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + _, err = client.GetComponentStates(ctx, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "TLS handshake failed") + }) + + t.Run("TlsHandshakeSuccess", func(t *testing.T) { + cp, err := utils.CreateCertPoolforClient(validPath, "rootcoord") + assert.Nil(t, err) + mockRC.ExpectedCalls = nil + + mockGrpcClient.EXPECT().SetInternalTLSCertPool(cp).Return().Once() + mockGrpcClient.EXPECT().GetNodeID().Return(1) + mockRC.EXPECT().GetComponentStates(testify_mock.Anything, testify_mock.Anything).Return(&milvuspb.ComponentStates{}, nil) + + client.(*Client).grpcClient.GetNodeID() + client.(*Client).grpcClient.SetInternalTLSCertPool(cp) + + componentStates, err := client.GetComponentStates(ctx, nil) + assert.Nil(t, err) + assert.NotNil(t, componentStates) + assert.IsType(t, &milvuspb.ComponentStates{}, componentStates) + }) + + t.Run("ContextDeadlineExceeded", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + time.Sleep(20 * time.Millisecond) + + _, err := client.GetComponentStates(ctx, nil) + assert.ErrorIs(t, err, context.DeadlineExceeded) + }) +} diff --git a/internal/distributed/rootcoord/service.go b/internal/distributed/rootcoord/service.go index d49c3ae4a89f6..efc0944946133 100644 --- a/internal/distributed/rootcoord/service.go +++ b/internal/distributed/rootcoord/service.go @@ -42,7 +42,6 @@ import ( "github.com/milvus-io/milvus/internal/util/dependency" _ "github.com/milvus-io/milvus/internal/util/grpcclient" "github.com/milvus-io/milvus/pkg/log" - "github.com/milvus-io/milvus/pkg/tracer" "github.com/milvus-io/milvus/pkg/util" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" @@ -278,7 +277,7 @@ func (s *Server) startGrpcLoop() { ctx, cancel := context.WithCancel(s.ctx) defer cancel() - s.grpcServer = grpc.NewServer( + grpcOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), grpc.MaxRecvMsgSize(Params.ServerMaxRecvSize.GetAsInt()), @@ -302,8 +301,10 @@ func (s *Server) startGrpcLoop() { } return s.serverID.Load() }), - )), - grpc.StatsHandler(tracer.GetDynamicOtelGrpcServerStatsHandler())) + ))} + + grpcOpts = append(grpcOpts, utils.EnableInternalTLS("RootCoord")) + s.grpcServer = grpc.NewServer(grpcOpts...) rootcoordpb.RegisterRootCoordServer(s.grpcServer, s) go funcutil.CheckGrpcReady(ctx, s.grpcErrChan) diff --git a/internal/distributed/utils/util.go b/internal/distributed/utils/util.go index f2cc161ead0b1..14435ca2f5972 100644 --- a/internal/distributed/utils/util.go +++ b/internal/distributed/utils/util.go @@ -1,9 +1,14 @@ package utils import ( + "crypto/x509" + "os" + "strings" "time" + "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/paramtable" @@ -30,3 +35,67 @@ func GracefulStopGRPCServer(s *grpc.Server) { <-ch } } + +func getTLSCreds(certFile string, keyFile string, nodeType string) credentials.TransportCredentials { + log.Info("TLS Server PEM Path", zap.String("path", certFile)) + log.Info("TLS Server Key Path", zap.String("path", keyFile)) + creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) + if err != nil { + log.Warn(nodeType+" can't create creds", zap.Error(err)) + log.Warn(nodeType+" can't create creds", zap.Error(err)) + } + return creds +} + +func EnableInternalTLS(NodeType string) grpc.ServerOption { + var Params *paramtable.ComponentParam = paramtable.Get() + var serverCfg *paramtable.GrpcServerConfig + switch strings.ToLower(NodeType) { + case "datacoord": + serverCfg = &Params.DataCoordGrpcServerCfg + case "datanode": + serverCfg = &Params.DataNodeGrpcServerCfg + case "indexnode": + serverCfg = &Params.IndexNodeGrpcServerCfg + case "proxy": + serverCfg = &Params.ProxyGrpcServerCfg + case "querycoord": + serverCfg = &Params.QueryCoordGrpcServerCfg + case "querynode": + serverCfg = &Params.QueryNodeGrpcServerCfg + case "rootcoord": + serverCfg = &Params.RootCoordGrpcServerCfg + default: + log.Error("Unknown NodeType") + return grpc.Creds(nil) + } + certFile := serverCfg.InternalTLSServerPemPath.GetValue() + keyFile := serverCfg.InternalTLSServerKeyPath.GetValue() + internaltlsEnabled := serverCfg.InternalTLSEnabled.GetAsBool() + + log.Info("internal TLS Enabled", zap.Bool("value", internaltlsEnabled)) + + if internaltlsEnabled { + creds := getTLSCreds(certFile, keyFile, NodeType) + return grpc.Creds(creds) + } + return grpc.Creds(nil) +} + +func CreateCertPoolforClient(caFile string, nodeType string) (*x509.CertPool, error) { + log.Info("Creating cert pool for " + nodeType) + log.Info("Cert file path:", zap.String("caFile", caFile)) + certPool := x509.NewCertPool() + + b, err := os.ReadFile(caFile) + if err != nil { + log.Error("Error reading cert file in client", zap.Error(err)) + return nil, err + } + + if !certPool.AppendCertsFromPEM(b) { + log.Error("credentials: failed to append certificates") + return nil, err // Cert pool is invalid, return nil and the error + } + return certPool, err +} diff --git a/internal/mocks/mock_grpc_client.go b/internal/mocks/mock_grpc_client.go index e47fa2bf400ff..4bd3c96f7de2f 100644 --- a/internal/mocks/mock_grpc_client.go +++ b/internal/mocks/mock_grpc_client.go @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" sessionutil "github.com/milvus-io/milvus/internal/util/sessionutil" + + x509 "crypto/x509" ) // MockGrpcClient is an autogenerated mock type for the GrpcClient type @@ -325,6 +327,39 @@ func (_c *MockGrpcClient_SetGetAddrFunc_Call[T]) RunAndReturn(run func(func() (s return _c } +// SetInternalTLSCertPool provides a mock function with given fields: cp +func (_m *MockGrpcClient[T]) SetInternalTLSCertPool(cp *x509.CertPool) { + _m.Called(cp) +} + +// MockGrpcClient_SetInternalTLSCertPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetInternalTLSCertPool' +type MockGrpcClient_SetInternalTLSCertPool_Call[T grpcclient.GrpcComponent] struct { + *mock.Call +} + +// SetInternalTLSCertPool is a helper method to define mock.On call +// - cp *x509.CertPool +func (_e *MockGrpcClient_Expecter[T]) SetInternalTLSCertPool(cp interface{}) *MockGrpcClient_SetInternalTLSCertPool_Call[T] { + return &MockGrpcClient_SetInternalTLSCertPool_Call[T]{Call: _e.mock.On("SetInternalTLSCertPool", cp)} +} + +func (_c *MockGrpcClient_SetInternalTLSCertPool_Call[T]) Run(run func(cp *x509.CertPool)) *MockGrpcClient_SetInternalTLSCertPool_Call[T] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*x509.CertPool)) + }) + return _c +} + +func (_c *MockGrpcClient_SetInternalTLSCertPool_Call[T]) Return() *MockGrpcClient_SetInternalTLSCertPool_Call[T] { + _c.Call.Return() + return _c +} + +func (_c *MockGrpcClient_SetInternalTLSCertPool_Call[T]) RunAndReturn(run func(*x509.CertPool)) *MockGrpcClient_SetInternalTLSCertPool_Call[T] { + _c.Call.Return(run) + return _c +} + // SetNewGrpcClientFunc provides a mock function with given fields: _a0 func (_m *MockGrpcClient[T]) SetNewGrpcClientFunc(_a0 func(*grpc.ClientConn) T) { _m.Called(_a0) diff --git a/internal/util/grpcclient/client.go b/internal/util/grpcclient/client.go index dd9e805da5e31..8927b44d207b1 100644 --- a/internal/util/grpcclient/client.go +++ b/internal/util/grpcclient/client.go @@ -19,6 +19,7 @@ package grpcclient import ( "context" "crypto/tls" + "crypto/x509" "strings" "sync" "time" @@ -84,6 +85,7 @@ type GrpcClient[T GrpcComponent] interface { GetRole() string SetGetAddrFunc(func() (string, error)) EnableEncryption() + SetInternalTLSCertPool(cp *x509.CertPool) SetNewGrpcClientFunc(func(cc *grpc.ClientConn) T) ReCall(ctx context.Context, caller func(client T) (any, error)) (any, error) Call(ctx context.Context, caller func(client T) (any, error)) (any, error) @@ -101,9 +103,10 @@ type ClientBase[T interface { newGrpcClient func(cc *grpc.ClientConn) T // grpcClient T - grpcClient *clientConnWrapper[T] - encryption bool - addr atomic.String + grpcClient *clientConnWrapper[T] + encryption bool + cpInternalTLS *x509.CertPool + addr atomic.String // conn *grpc.ClientConn grpcClientMtx sync.RWMutex role string @@ -187,6 +190,10 @@ func (c *ClientBase[T]) EnableEncryption() { c.encryption = true } +func (c *ClientBase[T]) SetInternalTLSCertPool(cp *x509.CertPool) { + c.cpInternalTLS = cp +} + // SetNewGrpcClientFunc sets newGrpcClient of client func (c *ClientBase[T]) SetNewGrpcClientFunc(f func(cc *grpc.ClientConn) T) { c.newGrpcClient = f @@ -257,11 +264,12 @@ func (c *ClientBase[T]) connect(ctx context.Context) error { compress = Zstd } if c.encryption { + log.Debug("Running in internalTLS mode with encryption enabled") conn, err = grpc.DialContext( dialContext, addr, // #nosec G402 - grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})), + grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{RootCAs: c.cpInternalTLS})), grpc.WithBlock(), grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(c.ClientMaxRecvSize), diff --git a/internal/util/mock/grpcclient.go b/internal/util/mock/grpcclient.go index b466f097c3759..5ffa9d8bd7a2e 100644 --- a/internal/util/mock/grpcclient.go +++ b/internal/util/mock/grpcclient.go @@ -18,6 +18,7 @@ package mock import ( "context" + "crypto/x509" "fmt" "sync" @@ -37,6 +38,7 @@ type GRPCClientBase[T any] struct { newGrpcClient func(cc *grpc.ClientConn) T grpcClient T + cpInternalTLS *x509.CertPool conn *grpc.ClientConn grpcClientMtx sync.RWMutex GetGrpcClientErr error @@ -60,6 +62,10 @@ func (c *GRPCClientBase[T]) SetRole(role string) { func (c *GRPCClientBase[T]) EnableEncryption() { } +func (c *GRPCClientBase[T]) SetInternalTLSCertPool(cp *x509.CertPool) { + c.cpInternalTLS = cp +} + func (c *GRPCClientBase[T]) SetNewGrpcClientFunc(f func(cc *grpc.ClientConn) T) { c.newGrpcClient = f } diff --git a/pkg/util/paramtable/grpc_param.go b/pkg/util/paramtable/grpc_param.go index f2afef49007ec..c1e41874398b7 100644 --- a/pkg/util/paramtable/grpc_param.go +++ b/pkg/util/paramtable/grpc_param.go @@ -64,15 +64,20 @@ const ( // ///////////////////////////////////////////////////////////////////////////// // --- grpc --- type grpcConfig struct { - Domain string `refreshable:"false"` - IP string `refreshable:"false"` - TLSMode ParamItem `refreshable:"false"` - IPItem ParamItem `refreshable:"false"` - Port ParamItem `refreshable:"false"` - InternalPort ParamItem `refreshable:"false"` - ServerPemPath ParamItem `refreshable:"false"` - ServerKeyPath ParamItem `refreshable:"false"` - CaPemPath ParamItem `refreshable:"false"` + Domain string `refreshable:"false"` + IP string `refreshable:"false"` + InternalTLSEnabled ParamItem `refreshable:"false"` + Address ParamItem `refreshable:"false"` + TLSMode ParamItem `refreshable:"false"` + IPItem ParamItem `refreshable:"false"` + Port ParamItem `refreshable:"false"` + InternalPort ParamItem `refreshable:"false"` + ServerPemPath ParamItem `refreshable:"false"` + ServerKeyPath ParamItem `refreshable:"false"` + CaPemPath ParamItem `refreshable:"false"` + InternalTLSServerPemPath ParamItem `refreshable:"false"` + InternalTLSServerKeyPath ParamItem `refreshable:"false"` + InternalTLSCaPemPath ParamItem `refreshable:"false"` } func (p *grpcConfig) init(domain string, base *BaseTable) { @@ -86,6 +91,13 @@ func (p *grpcConfig) init(domain string, base *BaseTable) { p.IPItem.Init(base.mgr) p.IP = funcutil.GetIP(p.IPItem.GetValue()) + p.Address = ParamItem{ + Key: p.Domain + ".internalAdd", + Version: "2.0.0", + Export: true, + } + p.Address.Init(base.mgr) + p.Port = ParamItem{ Key: p.Domain + ".port", Version: "2.0.0", @@ -130,15 +142,52 @@ func (p *grpcConfig) init(domain string, base *BaseTable) { Export: true, } p.CaPemPath.Init(base.mgr) + + p.InternalTLSEnabled = ParamItem{ + Key: "common.security.internaltlsEnabled", + Version: "2.0.0", + DefaultValue: "0", + Export: true, + } + p.InternalTLSEnabled.Init(base.mgr) + + p.InternalTLSServerPemPath = ParamItem{ + Key: "internaltls.serverPemPath", + Version: "2.0.0", + Export: true, + } + p.InternalTLSServerPemPath.Init(base.mgr) + + p.InternalTLSServerKeyPath = ParamItem{ + Key: "internaltls.serverKeyPath", + Version: "2.0.0", + Export: true, + } + p.InternalTLSServerKeyPath.Init(base.mgr) + + p.InternalTLSCaPemPath = ParamItem{ + Key: "internaltls.caPemPath", + Version: "2.0.0", + Export: true, + } + p.InternalTLSCaPemPath.Init(base.mgr) } // GetAddress return grpc address func (p *grpcConfig) GetAddress() string { - return p.IP + ":" + p.Port.GetValue() + if !p.InternalTLSEnabled.GetAsBool() { + return p.IP + ":" + p.Port.GetValue() + } + fmt.Println("address: ", p.Address.GetValue()) + return p.Address.GetValue() + ":" + p.Port.GetValue() } func (p *grpcConfig) GetInternalAddress() string { - return p.IP + ":" + p.InternalPort.GetValue() + if !p.InternalTLSEnabled.GetAsBool() { + return p.IP + ":" + p.InternalPort.GetValue() + } + fmt.Println("address: ", p.Address.GetValue()) + return p.Address.GetValue() + ":" + p.InternalPort.GetValue() } // GrpcServerConfig is configuration for grpc server. diff --git a/pkg/util/paramtable/grpc_param_test.go b/pkg/util/paramtable/grpc_param_test.go index d1970bec8a14a..d8d12a521bf1a 100644 --- a/pkg/util/paramtable/grpc_param_test.go +++ b/pkg/util/paramtable/grpc_param_test.go @@ -177,4 +177,15 @@ func TestGrpcClientParams(t *testing.T) { assert.Equal(t, clientConfig.ServerPemPath.GetValue(), "/pem") assert.Equal(t, clientConfig.ServerKeyPath.GetValue(), "/key") assert.Equal(t, clientConfig.CaPemPath.GetValue(), "/ca") + + base.Save("common.security.internalTlsEnabled", "True") + base.Save("internaltls.serverPemPath", "/pem") + base.Save("internaltls.serverKeyPath", "/key") + base.Save("internaltls.caPemPath", "/ca") + base.Save("internaltls.Address","/datanode") + assert.Equal(t, clientConfig.TLSMode.GetAsBool(), "True") + assert.Equal(t, clientConfig.ServerPemPath.GetValue(), "/pem") + assert.Equal(t, clientConfig.ServerKeyPath.GetValue(), "/key") + assert.Equal(t, clientConfig.CaPemPath.GetValue(), "/ca") + assert.Equal(t, clientConfig.Address.GetValue(), "/datanode") }