From 52da2e2c4130abe3a34517e4a6fddacde4548a85 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Mon, 21 Oct 2024 11:18:06 -0700 Subject: [PATCH 1/6] Creating branch for v25.02 From 0b0a901a7ee6a835251f06456b1aa4464f5ae1c9 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Mon, 21 Oct 2024 12:17:21 -0700 Subject: [PATCH 2/6] Fix imports for version parser --- ci/release/update-version.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 6651dcdb15..ccb42e7c27 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -48,8 +48,8 @@ NEXT_PATCH=$(echo ${NEXT_FULL_VERSION} | awk '{split($0, a, "."); print a[3]}') NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR} # Need to distutils-normalize the versions for some use cases -CURRENT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${CURRENT_SHORT_TAG}'))") -NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_SHORT_TAG}'))") +CURRENT_SHORT_TAG_PEP440=$(python -c "from packaging import version; print(version.Version('${CURRENT_SHORT_TAG}'))") +NEXT_SHORT_TAG_PEP440=$(python -c "from packaging import version; print(version.Version('${NEXT_SHORT_TAG}'))") echo "Preparing release $CURRENT_FULL_VERSION (PEP ${CURRENT_SHORT_TAG_PEP440}) => $NEXT_FULL_VERSION (PEP ${NEXT_SHORT_TAG_PEP440})" From c10f0baa5b5033e00fa1f5f764919c7e510ec9bf Mon Sep 17 00:00:00 2001 From: David Gardner Date: Mon, 21 Oct 2024 12:32:54 -0700 Subject: [PATCH 3/6] For some reason 'conda run -n base' isn't setting the path properly --- ci/release/update-version.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index ccb42e7c27..7d88cd4c97 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -84,7 +84,7 @@ sed_runner "s|branch-${CURRENT_SHORT_TAG}|branch-${NEXT_SHORT_TAG}|g" manifest.y sed_runner "s/mrc=${CURRENT_SHORT_TAG}/mrc=${NEXT_SHORT_TAG}/g" dependencies.yaml # Generate the environment files based upon the updated dependencies.yaml -conda run -n base --live-stream rapids-dependency-file-generator +rapids-dependency-file-generator # examples/digital_fingerprinting sed_runner "s/v${CURRENT_FULL_VERSION}-runtime/v${NEXT_FULL_VERSION}-runtime/g" \ From 258acf426756d78e310db2269afe5d9ea580c5c2 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Mon, 21 Oct 2024 12:33:43 -0700 Subject: [PATCH 4/6] Updating versions for v25.02.00 --- .gitmodules | 4 +-- CMakeLists.txt | 2 +- .../all_cuda-125_arch-x86_64.yaml | 2 +- .../dev_cuda-125_arch-x86_64.yaml | 2 +- .../examples_cuda-125_arch-x86_64.yaml | 2 +- .../runtime_cuda-125_arch-x86_64.yaml | 2 +- dependencies.yaml | 4 +-- docs/source/basics/building_a_pipeline.md | 2 +- docs/source/cloud_deployment_guide.md | 6 ++-- .../guides/2_real_world_phishing.md | 2 +- docs/source/examples.md | 2 +- docs/source/getting_started.md | 20 ++++++------ examples/abp_nvsmi_detection/README.md | 4 +-- examples/abp_pcap_detection/README.md | 4 +-- .../3_simple_cpp_stage/CMakeLists.txt | 2 +- .../4_rabbitmq_cpp_stage/CMakeLists.txt | 2 +- .../production/Dockerfile | 2 +- .../production/docker-compose.yml | 4 +-- examples/doca/vdb_realtime/README.md | 2 +- examples/llm/vdb_upload/README.md | 6 ++-- examples/log_parsing/README.md | 4 +-- examples/nlp_si_detection/README.md | 2 +- examples/ransomware_detection/README.md | 4 +-- examples/root_cause_analysis/README.md | 2 +- examples/sid_visualization/docker-compose.yml | 2 +- external/morpheus-visualizations | 2 +- external/utilities | 2 +- manifest.yaml | 2 +- models/model-cards/abp-model-card.md | 6 ++-- models/model-cards/dfp-model-card.md | 32 +++++++++---------- models/model-cards/gnn-fsi-model-card.md | 2 +- models/model-cards/phishing-model-card.md | 2 +- .../root-cause-analysis-model-card.md | 6 ++-- models/triton-model-repo/README.md | 2 +- scripts/validation/val-globals.sh | 2 +- tests/benchmarks/README.md | 4 +-- 36 files changed, 76 insertions(+), 76 deletions(-) diff --git a/.gitmodules b/.gitmodules index a89deaaf89..3e8cef976f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,8 +1,8 @@ [submodule "external/morpheus-visualizations"] path = external/morpheus-visualizations url = https://github.com/nv-morpheus/morpheus-visualizations.git - branch = branch-24.10 + branch = branch-25.02 [submodule "external/utilities"] path = external/utilities url = https://github.com/nv-morpheus/utilities.git - branch = branch-24.10 + branch = branch-25.02 diff --git a/CMakeLists.txt b/CMakeLists.txt index bd9580ae12..5e74aaff25 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,7 +99,7 @@ morpheus_utils_initialize_cuda_arch(morpheus) # Note intentionally excluding CUDA from the LANGUAGES list allowing us to set some clang specific settings later when # we call morpheus_utils_enable_cuda() project(morpheus - VERSION 24.10.00 + VERSION 25.02.00 LANGUAGES C CXX ) diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 0281cf6b68..004e202a48 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -67,7 +67,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow -- mrc=24.10 +- mrc=25.02 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/dev_cuda-125_arch-x86_64.yaml b/conda/environments/dev_cuda-125_arch-x86_64.yaml index af599fb7de..40b952a1ad 100644 --- a/conda/environments/dev_cuda-125_arch-x86_64.yaml +++ b/conda/environments/dev_cuda-125_arch-x86_64.yaml @@ -57,7 +57,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow -- mrc=24.10 +- mrc=25.02 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/examples_cuda-125_arch-x86_64.yaml b/conda/environments/examples_cuda-125_arch-x86_64.yaml index e387e2c9bf..7ca238c703 100644 --- a/conda/environments/examples_cuda-125_arch-x86_64.yaml +++ b/conda/environments/examples_cuda-125_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - kfp - libwebp=1.3.2 - mlflow -- mrc=24.10 +- mrc=25.02 - networkx=2.8.8 - newspaper3k=0.2 - nodejs=18.* diff --git a/conda/environments/runtime_cuda-125_arch-x86_64.yaml b/conda/environments/runtime_cuda-125_arch-x86_64.yaml index 2551739061..b3f9824f16 100644 --- a/conda/environments/runtime_cuda-125_arch-x86_64.yaml +++ b/conda/environments/runtime_cuda-125_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - grpcio-status - libwebp=1.3.2 - mlflow -- mrc=24.10 +- mrc=25.02 - networkx=2.8.8 - numpydoc=1.5 - pip diff --git a/dependencies.yaml b/dependencies.yaml index 53ed12a9ab..867fe27b78 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -284,7 +284,7 @@ dependencies: - libcudf=24.10 - librdkafka>=1.9.2,<1.10.0a0 - libzlib >=1.3.1,<2 - - mrc=24.10 + - mrc=25.02 - nlohmann_json=3.11 - pybind11-stubgen=0.10.5 - pylibcudf=24.10 @@ -364,7 +364,7 @@ dependencies: - grpcio-status # - libwebp=1.3.2 # Required for CVE mitigation: https://nvd.nist.gov/vuln/detail/CVE-2023-4863 ## - mlflow #>=2.10.0,<3 - - mrc=24.10 + - mrc=25.02 - networkx=2.8.8 - numpydoc=1.5 - pydantic diff --git a/docs/source/basics/building_a_pipeline.md b/docs/source/basics/building_a_pipeline.md index 06985d5ef6..94fabef0ec 100644 --- a/docs/source/basics/building_a_pipeline.md +++ b/docs/source/basics/building_a_pipeline.md @@ -207,7 +207,7 @@ This example shows an NLP Pipeline which uses several stages available in Morphe #### Launching Triton Run the following to launch Triton and load the `sid-minibert` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx ``` #### Launching Kafka diff --git a/docs/source/cloud_deployment_guide.md b/docs/source/cloud_deployment_guide.md index 060e85a452..c0979a47ef 100644 --- a/docs/source/cloud_deployment_guide.md +++ b/docs/source/cloud_deployment_guide.md @@ -104,7 +104,7 @@ The Helm chart (`morpheus-ai-engine`) that offers the auxiliary components requi Follow the below steps to install Morpheus AI Engine: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` ```bash helm install --set ngc.apiKey="$API_KEY" \ @@ -146,7 +146,7 @@ replicaset.apps/zookeeper-87f9f4dd 1 1 1 54s Run the following command to pull the Morpheus SDK Client (referred to as Helm chart `morpheus-sdk-client`) on to your instance: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` #### Morpheus SDK Client in Sleep Mode @@ -184,7 +184,7 @@ kubectl -n $NAMESPACE exec sdk-cli-helper -- cp -RL /workspace/models /common The Morpheus MLflow Helm chart offers MLflow server with Triton plugin to deploy, update, and remove models from the Morpheus AI Engine. The MLflow server UI can be accessed using NodePort `30500`. Follow the below steps to install the Morpheus MLflow: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` ```bash helm install --set ngc.apiKey="$API_KEY" \ diff --git a/docs/source/developer_guide/guides/2_real_world_phishing.md b/docs/source/developer_guide/guides/2_real_world_phishing.md index b1ae038f1a..d6d27c9d9e 100644 --- a/docs/source/developer_guide/guides/2_real_world_phishing.md +++ b/docs/source/developer_guide/guides/2_real_world_phishing.md @@ -235,7 +235,7 @@ We will launch a Triton Docker container with: ```shell docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ diff --git a/docs/source/examples.md b/docs/source/examples.md index 3c7b8bc424..2989284260 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -40,7 +40,7 @@ Morpheus supports multiple environments, each environment is intended to support In addition to this many of the examples utilize the Morpheus Triton Models container which can be obtained by running the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` The following are the supported environments: diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md index b4d2b04cab..53ce4fad2f 100644 --- a/docs/source/getting_started.md +++ b/docs/source/getting_started.md @@ -41,18 +41,18 @@ More advanced users, or those who are interested in using the latest pre-release ### Pull the Morpheus Image 1. Go to [https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags) 1. Choose a version -1. Download the selected version, for example for `24.10`: +1. Download the selected version, for example for `25.02`: ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus:24.10-runtime + docker pull nvcr.io/nvidia/morpheus/morpheus:25.02-runtime ``` 1. Optional, many of the examples require NVIDIA Triton Inference Server to be running with the included models. To download the Morpheus Triton Server Models container (ensure that the version number matches that of the Morpheus container you downloaded in the previous step): ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 + docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` > **Note about Morpheus versions:** > -> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime` this tag will always refer to the latest point release for that version. In addition to this there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime` this will be the initial point release for that version (ex. `v24.10.00-runtime`). In the event of a major bug, we may release additional point releases (ex. `v24.10.01-runtime`, `v24.10.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release. +> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime` this tag will always refer to the latest point release for that version. In addition to this there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime` this will be the initial point release for that version (ex. `v25.02.00-runtime`). In the event of a major bug, we may release additional point releases (ex. `v25.02.01-runtime`, `v25.02.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release. > > Users who want to ensure they are running with the latest bug fixes should use a release image tag (`YY.MM-runtime`). Users who need to deploy a specific version into production should use a point release image tag (`vYY.MM.00-runtime`). @@ -60,7 +60,7 @@ More advanced users, or those who are interested in using the latest pre-release 1. Ensure that [The NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation) is installed. 1. Start the container downloaded from the previous section: ```bash -docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:24.10-runtime bash +docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:25.02-runtime bash ``` Note about some of the flags above: @@ -140,17 +140,17 @@ To run the built "release" container, use the following: ./docker/run_container_release.sh ``` -The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME`, and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v24.10.00` use the following: +The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME`, and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v25.02.00` use the following: ```bash -DOCKER_IMAGE_TAG="v24.10.00-runtime" ./docker/run_container_release.sh +DOCKER_IMAGE_TAG="v25.02.00-runtime" ./docker/run_container_release.sh ``` ## Acquiring the Morpheus Models Container Many of the validation tests and example workflows require a Triton server to function. For simplicity Morpheus provides a pre-built models container which contains both Triton and the Morpheus models. Users using a release version of Morpheus can download the corresponding Triton models container from NGC with the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` Users working with an unreleased development version of Morpheus can build the Triton models container from the Morpheus repository. To build the Triton models container, from the root of the Morpheus repository run the following command: @@ -163,7 +163,7 @@ models/docker/build_container.sh In a new terminal use the following command to launch a Docker container for Triton loading all of the included pre-trained models: ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ @@ -176,7 +176,7 @@ This will launch Triton using the default network ports (8000 for HTTP, 8001 for Note: The above command is useful for testing out Morpheus, however it does load several models into GPU memory, which at time of writing consumes roughly 2GB of GPU memory. Production users should consider only loading the specific models they plan on using with the `--model-control-mode=explicit` and `--load-model` flags. For example to launch Triton only loading the `abp-nvsmi-xgb` model: ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ diff --git a/examples/abp_nvsmi_detection/README.md b/examples/abp_nvsmi_detection/README.md index b29ad6bb84..67b81385da 100644 --- a/examples/abp_nvsmi_detection/README.md +++ b/examples/abp_nvsmi_detection/README.md @@ -89,12 +89,12 @@ This example utilizes the Triton Inference Server to perform inference. Pull the Docker image for Triton: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` Run the following to launch Triton and load the `abp-nvsmi-xgb` XGBoost model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb ``` This will launch Triton and only load the `abp-nvsmi-xgb` model. This model has been configured with a max batch size of 32768, and to use dynamic batching for increased performance. diff --git a/examples/abp_pcap_detection/README.md b/examples/abp_pcap_detection/README.md index 77beb6675b..6dc63212af 100644 --- a/examples/abp_pcap_detection/README.md +++ b/examples/abp_pcap_detection/README.md @@ -30,13 +30,13 @@ To run this example, an instance of Triton Inference Server and a sample dataset ### Triton Inference Server ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` ##### Deploy Triton Inference Server Run the following to launch Triton and load the `abp-pcap-xgb` model: ```bash -docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb +docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb ``` ##### Verify Model Deployment diff --git a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt index 542582577f..97c7cc554e 100644 --- a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt +++ b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt @@ -25,7 +25,7 @@ mark_as_advanced(MORPHEUS_CACHE_DIR) list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}") project(3_simple_cpp_stage - VERSION 24.10.00 + VERSION 25.02.00 LANGUAGES C CXX ) diff --git a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt index 599cc7641a..4d50e40eb2 100644 --- a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt +++ b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt @@ -26,7 +26,7 @@ list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") project(4_rabbitmq_cpp_stage - VERSION 24.10.00 + VERSION 25.02.00 LANGUAGES C CXX ) diff --git a/examples/digital_fingerprinting/production/Dockerfile b/examples/digital_fingerprinting/production/Dockerfile index d2e330c944..102158fd26 100644 --- a/examples/digital_fingerprinting/production/Dockerfile +++ b/examples/digital_fingerprinting/production/Dockerfile @@ -14,7 +14,7 @@ # limitations under the License. ARG MORPHEUS_CONTAINER=nvcr.io/nvidia/morpheus/morpheus -ARG MORPHEUS_CONTAINER_VERSION=v24.10.00-runtime +ARG MORPHEUS_CONTAINER_VERSION=v25.02.00-runtime FROM ${MORPHEUS_CONTAINER}:${MORPHEUS_CONTAINER_VERSION} as base diff --git a/examples/digital_fingerprinting/production/docker-compose.yml b/examples/digital_fingerprinting/production/docker-compose.yml index bc86630e46..19cf16511b 100644 --- a/examples/digital_fingerprinting/production/docker-compose.yml +++ b/examples/digital_fingerprinting/production/docker-compose.yml @@ -74,7 +74,7 @@ services: target: jupyter args: - MORPHEUS_CONTAINER=${MORPHEUS_CONTAINER:-nvcr.io/nvidia/morpheus/morpheus} - - MORPHEUS_CONTAINER_VERSION=${MORPHEUS_CONTAINER_VERSION:-v24.10.00-runtime} + - MORPHEUS_CONTAINER_VERSION=${MORPHEUS_CONTAINER_VERSION:-v25.02.00-runtime} deploy: resources: reservations: @@ -106,7 +106,7 @@ services: target: runtime args: - MORPHEUS_CONTAINER=${MORPHEUS_CONTAINER:-nvcr.io/nvidia/morpheus/morpheus} - - MORPHEUS_CONTAINER_VERSION=${MORPHEUS_CONTAINER_VERSION:-v24.10.00-runtime} + - MORPHEUS_CONTAINER_VERSION=${MORPHEUS_CONTAINER_VERSION:-v25.02.00-runtime} image: dfp_morpheus container_name: morpheus_pipeline deploy: diff --git a/examples/doca/vdb_realtime/README.md b/examples/doca/vdb_realtime/README.md index 64dabdb459..b99a228d14 100644 --- a/examples/doca/vdb_realtime/README.md +++ b/examples/doca/vdb_realtime/README.md @@ -49,7 +49,7 @@ To serve the embedding model, we will use Triton: cd ${MORPHEUS_ROOT} # Launch Triton -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 ``` ## Populate the Milvus database diff --git a/examples/llm/vdb_upload/README.md b/examples/llm/vdb_upload/README.md index b892de2159..0a6e19b5d1 100644 --- a/examples/llm/vdb_upload/README.md +++ b/examples/llm/vdb_upload/README.md @@ -138,12 +138,12 @@ To retrieve datasets from LFS run the following: - Pull the Docker image for Triton: ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 + docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` - Run the following to launch Triton and load the `all-MiniLM-L6-v2` model: ```bash - docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 + docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 ``` This will launch Triton and only load the `all-MiniLM-L6-v2` model. Once Triton has loaded the model, the following @@ -277,7 +277,7 @@ using `sentence-transformers/paraphrase-multilingual-mpnet-base-v2` as an exampl - Reload the docker container, specifying that we also need to load paraphrase-multilingual-mpnet-base-v2 ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver \ --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model \ all-MiniLM-L6-v2 --load-model sentence-transformers/paraphrase-multilingual-mpnet-base-v2 ``` diff --git a/examples/log_parsing/README.md b/examples/log_parsing/README.md index 4d798a66cb..d712d619c4 100644 --- a/examples/log_parsing/README.md +++ b/examples/log_parsing/README.md @@ -34,14 +34,14 @@ Pull the Morpheus Triton models Docker image from NGC. Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` ##### Start Triton Inference Server Container From the Morpheus repo root directory, run the following to launch Triton and load the `log-parsing-onnx` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx ``` ##### Verify Model Deployment diff --git a/examples/nlp_si_detection/README.md b/examples/nlp_si_detection/README.md index 1d24fea105..e13f719087 100644 --- a/examples/nlp_si_detection/README.md +++ b/examples/nlp_si_detection/README.md @@ -85,7 +85,7 @@ This example utilizes the Triton Inference Server to perform inference. The neur From the Morpheus repo root directory, run the following to launch Triton and load the `sid-minibert` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx ``` This will launch Triton and only load the `sid-minibert-onnx` model. This model has been configured with a max batch size of 32, and to use dynamic batching for increased performance. diff --git a/examples/ransomware_detection/README.md b/examples/ransomware_detection/README.md index 4b15a30b71..a0e16a3311 100644 --- a/examples/ransomware_detection/README.md +++ b/examples/ransomware_detection/README.md @@ -35,7 +35,7 @@ Pull Docker image from NGC (https://ngc.nvidia.com/catalog/containers/nvidia:tri Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` ##### Start Triton Inference Server Container @@ -43,7 +43,7 @@ From the Morpheus repo root directory, run the following to launch Triton and lo ```bash # Run Triton in explicit mode docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --model-control-mode=explicit \ diff --git a/examples/root_cause_analysis/README.md b/examples/root_cause_analysis/README.md index 943c00fad2..d7a5f9d94a 100644 --- a/examples/root_cause_analysis/README.md +++ b/examples/root_cause_analysis/README.md @@ -54,7 +54,7 @@ This example utilizes the Triton Inference Server to perform inference. The bina From the Morpheus repo root directory, run the following to launch Triton and load the `root-cause-binary-onnx` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx ``` This will launch Triton and only load the model required by our example pipeline. The model has been configured with a max batch size of 32, and to use dynamic batching for increased performance. diff --git a/examples/sid_visualization/docker-compose.yml b/examples/sid_visualization/docker-compose.yml index 9f42360019..bbcd3e4534 100644 --- a/examples/sid_visualization/docker-compose.yml +++ b/examples/sid_visualization/docker-compose.yml @@ -23,7 +23,7 @@ x-with-gpus: &with_gpus services: triton: - image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 + image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 <<: *with_gpus command: "tritonserver --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx --model-repository=/models/triton-model-repo" environment: diff --git a/external/morpheus-visualizations b/external/morpheus-visualizations index f69a1fa8f5..c83e22fc0b 160000 --- a/external/morpheus-visualizations +++ b/external/morpheus-visualizations @@ -1 +1 @@ -Subproject commit f69a1fa8f5977b02a70436d92febfd4db1e0ad4d +Subproject commit c83e22fc0be11a522d51ee79eb64b2d94d55ae2c diff --git a/external/utilities b/external/utilities index 85f8f7af2e..3ae1a80875 160000 --- a/external/utilities +++ b/external/utilities @@ -1 +1 @@ -Subproject commit 85f8f7af2e8d9bc7bde978cd40c40297b1116957 +Subproject commit 3ae1a808759c8d57ee74d5c7038e0fbdb437de38 diff --git a/manifest.yaml b/manifest.yaml index 8646890c13..0eb4e0c616 100644 --- a/manifest.yaml +++ b/manifest.yaml @@ -26,6 +26,6 @@ repos: sub_dir: python/morpheus git: host: github - tag: branch-24.10 + tag: branch-25.02 upstream: nv-morpheus repo: morpheus diff --git a/models/model-cards/abp-model-card.md b/models/model-cards/abp-model-card.md index 874530ba78..9f3fda12b0 100644 --- a/models/model-cards/abp-model-card.md +++ b/models/model-cards/abp-model-card.md @@ -76,7 +76,7 @@ limitations under the License. ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/abp-sample-nvsmi-training-data.json
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json
**Properties (Quantity, Dataset Descriptions, Sensors):** * Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -84,7 +84,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/abp-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/abp-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -146,7 +146,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset. -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/abp-sample-nvsmi-training-data.json +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json ### Describe the life critical impact (if present). * None diff --git a/models/model-cards/dfp-model-card.md b/models/model-cards/dfp-model-card.md index 88b453d254..c1550d0921 100644 --- a/models/model-cards/dfp-model-card.md +++ b/models/model-cards/dfp-model-card.md @@ -65,36 +65,36 @@ The model architecture consists of an Autoencoder, where the reconstruction loss * Linux
## Model Versions: -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/dfp-models/hammah-role-g-20211017-dill.pkl -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/dfp-models/hammah-user123-20211017-dill.pkl +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-role-g-20211017-dill.pkl +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-user123-20211017-dill.pkl # Training & Evaluation: ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail +* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail **Properties (Quantity, Dataset Descriptions, Sensors):** The training dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment. -* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
-* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
-* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
-* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
-* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
+* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
+* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
+* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
+* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
+* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/validation-data/cloudtrail
+* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/validation-data/cloudtrail
**Properties (Quantity, Dataset Descriptions, Sensors):** The evaluation dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment. -* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records -* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records -* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records -* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records +* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records +* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records +* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records +* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records ## Inference: **Engine:** @@ -133,7 +133,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ### Describe how this model works. * The model works by training on baseline behaviors and subsequently detecting deviations from the established baseline, triggering alerts accordingly. -* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb) +* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb) ### List the technical limitations of the model. * The model expects CloudTrail logs with specific features that match the training dataset. Data lacking the required features or requiring a different feature set may not be compatible with the model. @@ -153,7 +153,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset (if able to share). -* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail +* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail ### Describe the life critical impact (if present). * None @@ -194,7 +194,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe * No ### Are we able to identify and trace source of dataset? -* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail)) +* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail)) ### Does data labeling (annotation, metadata) comply with privacy laws? * Not applicable (dataset is fully synthetic) diff --git a/models/model-cards/gnn-fsi-model-card.md b/models/model-cards/gnn-fsi-model-card.md index 27fc6f73a7..30358d2460 100644 --- a/models/model-cards/gnn-fsi-model-card.md +++ b/models/model-cards/gnn-fsi-model-card.md @@ -140,7 +140,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe * None ### Link the relevant end user license agreement -* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/LICENSE) +* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/LICENSE) ## Model Card ++ Safety & Security Subcard diff --git a/models/model-cards/phishing-model-card.md b/models/model-cards/phishing-model-card.md index e5f9e1908a..5315fa8003 100644 --- a/models/model-cards/phishing-model-card.md +++ b/models/model-cards/phishing-model-card.md @@ -84,7 +84,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/phishing-email-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/phishing-email-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * Dataset consists of SMS messages
diff --git a/models/model-cards/root-cause-analysis-model-card.md b/models/model-cards/root-cause-analysis-model-card.md index 1c2f8bd6d9..fb1937fe63 100644 --- a/models/model-cards/root-cause-analysis-model-card.md +++ b/models/model-cards/root-cause-analysis-model-card.md @@ -74,7 +74,7 @@ limitations under the License. ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/root-cause-training-data.csv
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv
**Properties (Quantity, Dataset Descriptions, Sensors):** * kern.log files from DGX machines
@@ -82,7 +82,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * kern.log files from DGX machines
@@ -141,7 +141,7 @@ limitations under the License. ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset. -* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/root-cause-training-data.csv +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv ### Describe the life critical impact (if present). * None diff --git a/models/triton-model-repo/README.md b/models/triton-model-repo/README.md index a173c2078a..790bef475e 100644 --- a/models/triton-model-repo/README.md +++ b/models/triton-model-repo/README.md @@ -40,7 +40,7 @@ The downside of using symlinks is that the entire Morpheus model repo must be vo ## Models Container The models in this directory are available in a pre-built container image containing Triton Inference Server, along with the models themselves. The container image is available on NGC and can be pulled using the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` Those users who are working on training their own models have two options available: diff --git a/scripts/validation/val-globals.sh b/scripts/validation/val-globals.sh index 810748fb99..94f3c66982 100755 --- a/scripts/validation/val-globals.sh +++ b/scripts/validation/val-globals.sh @@ -26,7 +26,7 @@ export e="\033[0;90m" export y="\033[0;33m" export x="\033[0m" -export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10"} +export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02"} # TRITON_GRPC_PORT is only used when TRITON_URL is undefined export TRITON_GRPC_PORT=${TRITON_GRPC_PORT:-"8001"} diff --git a/tests/benchmarks/README.md b/tests/benchmarks/README.md index 148dbb3d44..e908113ef0 100644 --- a/tests/benchmarks/README.md +++ b/tests/benchmarks/README.md @@ -24,12 +24,12 @@ Pull Morpheus Models Docker image from NGC. Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 ``` ##### Start Triton Inference Server container ```bash -docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2 +docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2 ``` ##### Verify Model Deployments From 8f305ff21afe297a9af807d390080c5b501428ad Mon Sep 17 00:00:00 2001 From: David Gardner Date: Tue, 5 Nov 2024 10:02:57 -0800 Subject: [PATCH 5/6] Formatting changes --- python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp | 2 +- python/morpheus/morpheus/_lib/src/utilities/table_util.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp b/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp index fc07f38c7c..7abd593392 100644 --- a/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp +++ b/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp @@ -29,7 +29,7 @@ #include // for cuda_stream_per_thread #include -#include // for copy, transform +#include // for copy, transform #include // for multiplies, plus, minus #include // for back_insert_iterator, back_inserter #include diff --git a/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp b/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp index 690363d166..d6aa159b6d 100644 --- a/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp +++ b/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp @@ -26,8 +26,8 @@ #include // for find, transform #include -#include // for back_insert_iterator, back_inserter -#include // for unique_ptr +#include // for back_insert_iterator, back_inserter +#include // for unique_ptr #include // needed for logging #include // for runtime_error From 19ebd06e9ecf1d5e8720df5beb118411a78932f3 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Tue, 5 Nov 2024 10:03:12 -0800 Subject: [PATCH 6/6] Updating stubs --- python/morpheus/morpheus/_lib/common/__init__.pyi | 2 +- python/morpheus/morpheus/_lib/messages/__init__.pyi | 2 +- python/morpheus/morpheus/_lib/modules/__init__.pyi | 2 +- python/morpheus/morpheus/_lib/stages/__init__.pyi | 2 +- python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/morpheus/morpheus/_lib/common/__init__.pyi b/python/morpheus/morpheus/_lib/common/__init__.pyi index 38f3c5fd66..8ba9ecf837 100644 --- a/python/morpheus/morpheus/_lib/common/__init__.pyi +++ b/python/morpheus/morpheus/_lib/common/__init__.pyi @@ -210,4 +210,4 @@ def typeid_to_numpy_str(arg0: TypeId) -> str: pass def write_df_to_file(df: object, filename: str, file_type: FileTypes = FileTypes.Auto, **kwargs) -> None: pass -__version__ = '24.10.0' +__version__ = '25.2.0' diff --git a/python/morpheus/morpheus/_lib/messages/__init__.pyi b/python/morpheus/morpheus/_lib/messages/__init__.pyi index 2b52b3d29b..11fba00aee 100644 --- a/python/morpheus/morpheus/_lib/messages/__init__.pyi +++ b/python/morpheus/morpheus/_lib/messages/__init__.pyi @@ -265,4 +265,4 @@ class InferenceMemoryFIL(InferenceMemory, TensorMemory): def seq_ids(self, arg1: object) -> None: pass pass -__version__ = '24.10.0' +__version__ = '25.2.0' diff --git a/python/morpheus/morpheus/_lib/modules/__init__.pyi b/python/morpheus/morpheus/_lib/modules/__init__.pyi index 0ec21dfaad..ed47a38d39 100644 --- a/python/morpheus/morpheus/_lib/modules/__init__.pyi +++ b/python/morpheus/morpheus/_lib/modules/__init__.pyi @@ -14,4 +14,4 @@ __all__ = [ ] -__version__ = '24.10.0' +__version__ = '25.2.0' diff --git a/python/morpheus/morpheus/_lib/stages/__init__.pyi b/python/morpheus/morpheus/_lib/stages/__init__.pyi index 922c194deb..8d2fe7f911 100644 --- a/python/morpheus/morpheus/_lib/stages/__init__.pyi +++ b/python/morpheus/morpheus/_lib/stages/__init__.pyi @@ -85,4 +85,4 @@ class SerializeStage(mrc.core.segment.SegmentObject): class WriteToFileStage(mrc.core.segment.SegmentObject): def __init__(self, builder: mrc.core.segment.Builder, name: str, filename: str, mode: str = 'w', file_type: morpheus._lib.common.FileTypes = FileTypes.Auto, include_index_col: bool = True, flush: bool = False) -> None: ... pass -__version__ = '24.10.0' +__version__ = '25.2.0' diff --git a/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi b/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi index 842dc8dba0..2f6f52addd 100644 --- a/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi +++ b/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi @@ -231,4 +231,4 @@ class LLMTaskHandler(): Task[Optional[list[ControlMessage]]] """ pass -__version__ = '24.10.0' +__version__ = '25.2.0'