diff --git a/.gitmodules b/.gitmodules
index a89deaaf89..3e8cef976f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,8 +1,8 @@
[submodule "external/morpheus-visualizations"]
path = external/morpheus-visualizations
url = https://github.com/nv-morpheus/morpheus-visualizations.git
- branch = branch-24.10
+ branch = branch-25.02
[submodule "external/utilities"]
path = external/utilities
url = https://github.com/nv-morpheus/utilities.git
- branch = branch-24.10
+ branch = branch-25.02
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bd9580ae12..5e74aaff25 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -99,7 +99,7 @@ morpheus_utils_initialize_cuda_arch(morpheus)
# Note intentionally excluding CUDA from the LANGUAGES list allowing us to set some clang specific settings later when
# we call morpheus_utils_enable_cuda()
project(morpheus
- VERSION 24.10.00
+ VERSION 25.02.00
LANGUAGES C CXX
)
diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh
index 534f26b519..be5909bcf8 100755
--- a/ci/release/update-version.sh
+++ b/ci/release/update-version.sh
@@ -84,7 +84,7 @@ sed_runner "s|branch-${CURRENT_SHORT_TAG}|branch-${NEXT_SHORT_TAG}|g" manifest.y
sed_runner "s/mrc=${CURRENT_SHORT_TAG}/mrc=${NEXT_SHORT_TAG}/g" dependencies.yaml
# Generate the environment files based upon the updated dependencies.yaml
-conda run -n base --live-stream rapids-dependency-file-generator
+rapids-dependency-file-generator
# examples/digital_fingerprinting
sed_runner "s/v${CURRENT_FULL_VERSION}-runtime/v${NEXT_FULL_VERSION}-runtime/g" \
diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml
index b913be445b..6195edb574 100644
--- a/conda/environments/all_cuda-125_arch-x86_64.yaml
+++ b/conda/environments/all_cuda-125_arch-x86_64.yaml
@@ -67,7 +67,7 @@ dependencies:
- libwebp=1.3.2
- libzlib >=1.3.1,<2
- mlflow
-- mrc=24.10
+- mrc=25.02
- myst-parser=0.18.1
- nbsphinx
- networkx=2.8.8
diff --git a/conda/environments/dev_cuda-125_arch-x86_64.yaml b/conda/environments/dev_cuda-125_arch-x86_64.yaml
index f27becb108..0bb8977635 100644
--- a/conda/environments/dev_cuda-125_arch-x86_64.yaml
+++ b/conda/environments/dev_cuda-125_arch-x86_64.yaml
@@ -57,7 +57,7 @@ dependencies:
- libwebp=1.3.2
- libzlib >=1.3.1,<2
- mlflow
-- mrc=24.10
+- mrc=25.02
- myst-parser=0.18.1
- nbsphinx
- networkx=2.8.8
diff --git a/conda/environments/examples_cuda-125_arch-x86_64.yaml b/conda/environments/examples_cuda-125_arch-x86_64.yaml
index 14ba7e9c8c..3844c3b7b7 100644
--- a/conda/environments/examples_cuda-125_arch-x86_64.yaml
+++ b/conda/environments/examples_cuda-125_arch-x86_64.yaml
@@ -30,7 +30,7 @@ dependencies:
- kfp
- libwebp=1.3.2
- mlflow
-- mrc=24.10
+- mrc=25.02
- networkx=2.8.8
- newspaper3k=0.2
- nodejs=18.*
diff --git a/conda/environments/runtime_cuda-125_arch-x86_64.yaml b/conda/environments/runtime_cuda-125_arch-x86_64.yaml
index 2551739061..b3f9824f16 100644
--- a/conda/environments/runtime_cuda-125_arch-x86_64.yaml
+++ b/conda/environments/runtime_cuda-125_arch-x86_64.yaml
@@ -27,7 +27,7 @@ dependencies:
- grpcio-status
- libwebp=1.3.2
- mlflow
-- mrc=24.10
+- mrc=25.02
- networkx=2.8.8
- numpydoc=1.5
- pip
diff --git a/dependencies.yaml b/dependencies.yaml
index 8de432eb24..c237792d2a 100644
--- a/dependencies.yaml
+++ b/dependencies.yaml
@@ -296,7 +296,7 @@ dependencies:
- libcudf=24.10
- librdkafka>=1.9.2,<1.10.0a0
- libzlib >=1.3.1,<2
- - mrc=24.10
+ - mrc=25.02
- nlohmann_json=3.11
- pybind11-stubgen=0.10.5
- pylibcudf=24.10
@@ -377,7 +377,7 @@ dependencies:
- grpcio-status
# - libwebp=1.3.2 # Required for CVE mitigation: https://nvd.nist.gov/vuln/detail/CVE-2023-4863 ##
- mlflow #>=2.10.0,<3
- - mrc=24.10
+ - mrc=25.02
- networkx=2.8.8
- numpydoc=1.5
- pydantic
diff --git a/docs/source/basics/building_a_pipeline.md b/docs/source/basics/building_a_pipeline.md
index 06985d5ef6..94fabef0ec 100644
--- a/docs/source/basics/building_a_pipeline.md
+++ b/docs/source/basics/building_a_pipeline.md
@@ -207,7 +207,7 @@ This example shows an NLP Pipeline which uses several stages available in Morphe
#### Launching Triton
Run the following to launch Triton and load the `sid-minibert` model:
```bash
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx
```
#### Launching Kafka
diff --git a/docs/source/cloud_deployment_guide.md b/docs/source/cloud_deployment_guide.md
index fd79c0f05e..fe49ffee30 100644
--- a/docs/source/cloud_deployment_guide.md
+++ b/docs/source/cloud_deployment_guide.md
@@ -103,7 +103,7 @@ The Helm chart (`morpheus-ai-engine`) that offers the auxiliary components requi
Follow the below steps to install Morpheus AI Engine:
```bash
-helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar
+helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar
```
```bash
helm install --set ngc.apiKey="$API_KEY" \
@@ -145,7 +145,7 @@ replicaset.apps/zookeeper-87f9f4dd 1 1 1 54s
Run the following command to pull the Morpheus SDK Client (referred to as Helm chart `morpheus-sdk-client`) on to your instance:
```bash
-helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar
+helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar
```
#### Morpheus SDK Client in Sleep Mode
@@ -183,7 +183,7 @@ kubectl -n $NAMESPACE exec sdk-cli-helper -- cp -RL /workspace/models /common
The Morpheus MLflow Helm chart offers MLflow server with Triton plugin to deploy, update, and remove models from the Morpheus AI Engine. The MLflow server UI can be accessed using NodePort `30500`. Follow the below steps to install the Morpheus MLflow:
```bash
-helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-24.10.tgz --username='$oauthtoken' --password=$API_KEY --untar
+helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar
```
```bash
helm install --set ngc.apiKey="$API_KEY" \
diff --git a/docs/source/developer_guide/guides/2_real_world_phishing.md b/docs/source/developer_guide/guides/2_real_world_phishing.md
index d4982c7eee..c460af3e02 100644
--- a/docs/source/developer_guide/guides/2_real_world_phishing.md
+++ b/docs/source/developer_guide/guides/2_real_world_phishing.md
@@ -235,7 +235,7 @@ We will launch a Triton Docker container with:
```shell
docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \
- nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \
+ nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \
tritonserver --model-repository=/models/triton-model-repo \
--exit-on-error=false \
--log-info=true \
diff --git a/docs/source/examples.md b/docs/source/examples.md
index 3c7b8bc424..2989284260 100644
--- a/docs/source/examples.md
+++ b/docs/source/examples.md
@@ -40,7 +40,7 @@ Morpheus supports multiple environments, each environment is intended to support
In addition to this many of the examples utilize the Morpheus Triton Models container which can be obtained by running the following command:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
The following are the supported environments:
diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md
index 02e430ea6e..7e76b38bdc 100644
--- a/docs/source/getting_started.md
+++ b/docs/source/getting_started.md
@@ -41,19 +41,19 @@ More advanced users, or those who are interested in using the latest pre-release
## Using Pre-Built Docker Containers
### Pull the Morpheus Image
1. Go to [https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags)
-1. Choose a version.
-1. Download the selected version, for example for `24.10`:
+1. Choose a version
+1. Download the selected version, for example for `25.02`:
```bash
- docker pull nvcr.io/nvidia/morpheus/morpheus:24.10-runtime
+ docker pull nvcr.io/nvidia/morpheus/morpheus:25.02-runtime
```
1. Optional: Many of the examples require NVIDIA Triton Inference Server to be running with the included models. To download the Morpheus Triton Server Models container, ensure that the version number matches that of the Morpheus container you downloaded in the previous step, then run:
```bash
- docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+ docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
> **Note about Morpheus versions:**
>
-> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime`. This tag will always refer to the latest point release for that version. In addition, there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime`. This will be the initial point release for that version (ex., `v24.10.00-runtime`). In the event of a major bug, we may release additional point releases (ex., `v24.10.01-runtime`, `v24.10.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release.
+> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime` this tag will always refer to the latest point release for that version. In addition to this there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime` this will be the initial point release for that version (ex. `v25.02.00-runtime`). In the event of a major bug, we may release additional point releases (ex. `v25.02.01-runtime`, `v25.02.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release.
>
> Users who want to ensure they are running with the latest bug fixes should use a release image tag (`YY.MM-runtime`). Users who need to deploy a specific version into production should use a point release image tag (`vYY.MM.00-runtime`).
@@ -61,7 +61,7 @@ More advanced users, or those who are interested in using the latest pre-release
1. Ensure that [The NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation) is installed.
1. Start the container downloaded from the previous section:
```bash
-docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:24.10-runtime bash
+docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:25.02-runtime bash
```
Note about some of the flags above:
@@ -147,17 +147,17 @@ To run the built "release" container, use the following:
./docker/run_container_release.sh
```
-The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME` and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v24.10.00` use the following:
+The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME`, and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v25.02.00` use the following:
```bash
-DOCKER_IMAGE_TAG="v24.10.00-runtime" ./docker/run_container_release.sh
+DOCKER_IMAGE_TAG="v25.02.00-runtime" ./docker/run_container_release.sh
```
## Acquiring the Morpheus Models Container
Many of the validation tests and example workflows require a Triton server to function. For simplicity, Morpheus provides a pre-built models container, which contains both the Triton and Morpheus models. Users implementing a release version of Morpheus can download the corresponding Triton models container from NGC with the following command:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
Users working with an unreleased development version of Morpheus can build the Triton models container from the Morpheus repository. To build the Triton models container, run the following command from the root of the Morpheus repository:
@@ -170,7 +170,7 @@ models/docker/build_container.sh
In a new terminal, use the following command to launch a Docker container for Triton loading all of the included pre-trained models:
```bash
docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \
- nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \
+ nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \
tritonserver --model-repository=/models/triton-model-repo \
--exit-on-error=false \
--log-info=true \
@@ -183,7 +183,7 @@ This will launch Triton using the default network ports (8000 for HTTP, 8001 for
Note: The above command is useful for testing out Morpheus, however it does load several models into GPU memory, which at the time of this writing consumes roughly 2GB of GPU memory. Production users should consider only loading the specific models they plan on using with the `--model-control-mode=explicit` and `--load-model` flags. For example, to launch Triton only loading the `abp-nvsmi-xgb` model:
```bash
docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \
- nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \
+ nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \
tritonserver --model-repository=/models/triton-model-repo \
--exit-on-error=false \
--log-info=true \
diff --git a/examples/abp_nvsmi_detection/README.md b/examples/abp_nvsmi_detection/README.md
index 19dc39bebd..244d729420 100644
--- a/examples/abp_nvsmi_detection/README.md
+++ b/examples/abp_nvsmi_detection/README.md
@@ -89,12 +89,12 @@ This example utilizes the Triton Inference Server to perform inference.
Pull the Docker image for Triton:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
Run the following to launch Triton and load the `abp-nvsmi-xgb` XGBoost model:
```bash
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb
```
This will launch Triton and only load the `abp-nvsmi-xgb` model. This model has been configured with a max batch size of 32768, and to use dynamic batching for increased performance.
diff --git a/examples/abp_pcap_detection/README.md b/examples/abp_pcap_detection/README.md
index 77beb6675b..6dc63212af 100644
--- a/examples/abp_pcap_detection/README.md
+++ b/examples/abp_pcap_detection/README.md
@@ -30,13 +30,13 @@ To run this example, an instance of Triton Inference Server and a sample dataset
### Triton Inference Server
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
##### Deploy Triton Inference Server
Run the following to launch Triton and load the `abp-pcap-xgb` model:
```bash
-docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb
+docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb
```
##### Verify Model Deployment
diff --git a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt
index 542582577f..97c7cc554e 100644
--- a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt
+++ b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt
@@ -25,7 +25,7 @@ mark_as_advanced(MORPHEUS_CACHE_DIR)
list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}")
project(3_simple_cpp_stage
- VERSION 24.10.00
+ VERSION 25.02.00
LANGUAGES C CXX
)
diff --git a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt
index 599cc7641a..4d50e40eb2 100644
--- a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt
+++ b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt
@@ -26,7 +26,7 @@ list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
project(4_rabbitmq_cpp_stage
- VERSION 24.10.00
+ VERSION 25.02.00
LANGUAGES C CXX
)
diff --git a/examples/digital_fingerprinting/production/docker-compose.yml b/examples/digital_fingerprinting/production/docker-compose.yml
index 232535199a..99281063c8 100644
--- a/examples/digital_fingerprinting/production/docker-compose.yml
+++ b/examples/digital_fingerprinting/production/docker-compose.yml
@@ -76,8 +76,8 @@ services:
resources:
reservations:
devices:
- - driver: nvidia
- capabilities: [gpu]
+ - driver: nvidia
+ capabilities: [ gpu ]
image: dfp_morpheus_jupyter
container_name: jupyter
ports:
@@ -107,8 +107,8 @@ services:
resources:
reservations:
devices:
- - driver: nvidia
- capabilities: [gpu]
+ - driver: nvidia
+ capabilities: [ gpu ]
networks:
- frontend
- backend
diff --git a/examples/doca/vdb_realtime/README.md b/examples/doca/vdb_realtime/README.md
index 64dabdb459..b99a228d14 100644
--- a/examples/doca/vdb_realtime/README.md
+++ b/examples/doca/vdb_realtime/README.md
@@ -49,7 +49,7 @@ To serve the embedding model, we will use Triton:
cd ${MORPHEUS_ROOT}
# Launch Triton
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2
```
## Populate the Milvus database
diff --git a/examples/llm/vdb_upload/README.md b/examples/llm/vdb_upload/README.md
index 15fcf60dda..2f96589ebe 100644
--- a/examples/llm/vdb_upload/README.md
+++ b/examples/llm/vdb_upload/README.md
@@ -143,12 +143,12 @@ milvus-server --data .tmp/milvusdb
- Pull the Docker image for Triton:
```bash
- docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+ docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
- Run the following to launch Triton and load the `all-MiniLM-L6-v2` model:
```bash
- docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2
+ docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2
```
This will launch Triton and only load the `all-MiniLM-L6-v2` model. Once Triton has loaded the model, the following
@@ -282,7 +282,7 @@ using `sentence-transformers/paraphrase-multilingual-mpnet-base-v2` as an exampl
- Reload the docker container, specifying that we also need to load paraphrase-multilingual-mpnet-base-v2
```bash
docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \
- nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver \
+ nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver \
--model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model \
all-MiniLM-L6-v2 --load-model sentence-transformers/paraphrase-multilingual-mpnet-base-v2
```
diff --git a/examples/log_parsing/README.md b/examples/log_parsing/README.md
index 0c0d22e01b..5d2485a3bc 100644
--- a/examples/log_parsing/README.md
+++ b/examples/log_parsing/README.md
@@ -34,14 +34,14 @@ Pull the Morpheus Triton models Docker image from NGC.
Example:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
##### Start Triton Inference Server Container
From the Morpheus repo root directory, run the following to launch Triton and load the `log-parsing-onnx` model:
```bash
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx
```
##### Verify Model Deployment
diff --git a/examples/nlp_si_detection/README.md b/examples/nlp_si_detection/README.md
index 507d64e862..37d4abfa1f 100644
--- a/examples/nlp_si_detection/README.md
+++ b/examples/nlp_si_detection/README.md
@@ -85,7 +85,7 @@ This example utilizes the Triton Inference Server to perform inference. The neur
From the Morpheus repo root directory, run the following to launch Triton and load the `sid-minibert` model:
```bash
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx
```
This will launch Triton and only load the `sid-minibert-onnx` model. This model has been configured with a max batch size of 32, and to use dynamic batching for increased performance.
diff --git a/examples/ransomware_detection/README.md b/examples/ransomware_detection/README.md
index 9d19b7820a..e1f7197e1e 100644
--- a/examples/ransomware_detection/README.md
+++ b/examples/ransomware_detection/README.md
@@ -35,7 +35,7 @@ Pull Docker image from NGC (https://ngc.nvidia.com/catalog/containers/nvidia:tri
Example:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
##### Start Triton Inference Server Container
@@ -43,7 +43,7 @@ From the Morpheus repo root directory, run the following to launch Triton and lo
```bash
# Run Triton in explicit mode
docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \
- nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 \
+ nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \
tritonserver --model-repository=/models/triton-model-repo \
--exit-on-error=false \
--model-control-mode=explicit \
diff --git a/examples/root_cause_analysis/README.md b/examples/root_cause_analysis/README.md
index 329df0e449..45d36b8f0f 100644
--- a/examples/root_cause_analysis/README.md
+++ b/examples/root_cause_analysis/README.md
@@ -54,7 +54,7 @@ This example utilizes the Triton Inference Server to perform inference. The bina
From the Morpheus repo root directory, run the following to launch Triton and load the `root-cause-binary-onnx` model:
```bash
-docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx
+docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx
```
This will launch Triton and only load the model required by our example pipeline. The model has been configured with a max batch size of 32, and to use dynamic batching for increased performance.
diff --git a/examples/sid_visualization/docker-compose.yml b/examples/sid_visualization/docker-compose.yml
index 9f42360019..bbcd3e4534 100644
--- a/examples/sid_visualization/docker-compose.yml
+++ b/examples/sid_visualization/docker-compose.yml
@@ -23,7 +23,7 @@ x-with-gpus: &with_gpus
services:
triton:
- image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+ image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
<<: *with_gpus
command: "tritonserver --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx --model-repository=/models/triton-model-repo"
environment:
diff --git a/external/morpheus-visualizations b/external/morpheus-visualizations
index f69a1fa8f5..c83e22fc0b 160000
--- a/external/morpheus-visualizations
+++ b/external/morpheus-visualizations
@@ -1 +1 @@
-Subproject commit f69a1fa8f5977b02a70436d92febfd4db1e0ad4d
+Subproject commit c83e22fc0be11a522d51ee79eb64b2d94d55ae2c
diff --git a/external/utilities b/external/utilities
index 87b33dd0b7..7f5904513c 160000
--- a/external/utilities
+++ b/external/utilities
@@ -1 +1 @@
-Subproject commit 87b33dd0b7fd3d7460742bc5ad13d77e0d722c3c
+Subproject commit 7f5904513ca1281670aea8c351dae140892d3dfc
diff --git a/manifest.yaml b/manifest.yaml
index 8646890c13..0eb4e0c616 100644
--- a/manifest.yaml
+++ b/manifest.yaml
@@ -26,6 +26,6 @@ repos:
sub_dir: python/morpheus
git:
host: github
- tag: branch-24.10
+ tag: branch-25.02
upstream: nv-morpheus
repo: morpheus
diff --git a/models/model-cards/abp-model-card.md b/models/model-cards/abp-model-card.md
index 874530ba78..9f3fda12b0 100644
--- a/models/model-cards/abp-model-card.md
+++ b/models/model-cards/abp-model-card.md
@@ -76,7 +76,7 @@ limitations under the License.
## Training Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/abp-sample-nvsmi-training-data.json
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json
**Properties (Quantity, Dataset Descriptions, Sensors):**
* Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -84,7 +84,7 @@ limitations under the License.
## Evaluation Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/abp-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/abp-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):**
* Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -146,7 +146,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe
## Model Card ++ Safety & Security Subcard
### Link the location of the repository for the training dataset.
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/abp-sample-nvsmi-training-data.json
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json
### Describe the life critical impact (if present).
* None
diff --git a/models/model-cards/dfp-model-card.md b/models/model-cards/dfp-model-card.md
index 88b453d254..c1550d0921 100644
--- a/models/model-cards/dfp-model-card.md
+++ b/models/model-cards/dfp-model-card.md
@@ -65,36 +65,36 @@ The model architecture consists of an Autoencoder, where the reconstruction loss
* Linux
## Model Versions:
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/dfp-models/hammah-role-g-20211017-dill.pkl
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/dfp-models/hammah-user123-20211017-dill.pkl
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-role-g-20211017-dill.pkl
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-user123-20211017-dill.pkl
# Training & Evaluation:
## Training Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail
+* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail
**Properties (Quantity, Dataset Descriptions, Sensors):**
The training dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment.
-* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
-* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
-* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
-* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
-* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
+* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
+* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
+* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
+* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
+* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
## Evaluation Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/validation-data/cloudtrail
+* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/validation-data/cloudtrail
**Properties (Quantity, Dataset Descriptions, Sensors):**
The evaluation dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment.
-* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records
-* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records
-* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records
-* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records
+* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records
+* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records
+* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records
+* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records
## Inference:
**Engine:**
@@ -133,7 +133,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe
### Describe how this model works.
* The model works by training on baseline behaviors and subsequently detecting deviations from the established baseline, triggering alerts accordingly.
-* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb)
+* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb)
### List the technical limitations of the model.
* The model expects CloudTrail logs with specific features that match the training dataset. Data lacking the required features or requiring a different feature set may not be compatible with the model.
@@ -153,7 +153,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe
## Model Card ++ Safety & Security Subcard
### Link the location of the repository for the training dataset (if able to share).
-* https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail
+* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail
### Describe the life critical impact (if present).
* None
@@ -194,7 +194,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe
* No
### Are we able to identify and trace source of dataset?
-* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-24.10/models/datasets/training-data/cloudtrail))
+* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail))
### Does data labeling (annotation, metadata) comply with privacy laws?
* Not applicable (dataset is fully synthetic)
diff --git a/models/model-cards/gnn-fsi-model-card.md b/models/model-cards/gnn-fsi-model-card.md
index 27fc6f73a7..30358d2460 100644
--- a/models/model-cards/gnn-fsi-model-card.md
+++ b/models/model-cards/gnn-fsi-model-card.md
@@ -140,7 +140,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe
* None
### Link the relevant end user license agreement
-* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/LICENSE)
+* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/LICENSE)
## Model Card ++ Safety & Security Subcard
diff --git a/models/model-cards/phishing-model-card.md b/models/model-cards/phishing-model-card.md
index e5f9e1908a..5315fa8003 100644
--- a/models/model-cards/phishing-model-card.md
+++ b/models/model-cards/phishing-model-card.md
@@ -84,7 +84,7 @@ limitations under the License.
## Evaluation Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/phishing-email-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/phishing-email-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):**
* Dataset consists of SMS messages
diff --git a/models/model-cards/root-cause-analysis-model-card.md b/models/model-cards/root-cause-analysis-model-card.md
index 1c2f8bd6d9..fb1937fe63 100644
--- a/models/model-cards/root-cause-analysis-model-card.md
+++ b/models/model-cards/root-cause-analysis-model-card.md
@@ -74,7 +74,7 @@ limitations under the License.
## Training Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/root-cause-training-data.csv
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv
**Properties (Quantity, Dataset Descriptions, Sensors):**
* kern.log files from DGX machines
@@ -82,7 +82,7 @@ limitations under the License.
## Evaluation Dataset:
**Link:**
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):**
* kern.log files from DGX machines
@@ -141,7 +141,7 @@ limitations under the License.
## Model Card ++ Safety & Security Subcard
### Link the location of the repository for the training dataset.
-* https://github.com/nv-morpheus/Morpheus/blob/branch-24.10/models/datasets/training-data/root-cause-training-data.csv
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv
### Describe the life critical impact (if present).
* None
diff --git a/models/triton-model-repo/README.md b/models/triton-model-repo/README.md
index 7b2f1c9ded..c12a0fb828 100644
--- a/models/triton-model-repo/README.md
+++ b/models/triton-model-repo/README.md
@@ -40,7 +40,7 @@ The downside of using symlinks is that the entire Morpheus model repo must be vo
## Models Container
The models in this directory are available in a pre-built container image containing Triton Inference Server, along with the models themselves. The container image is available on NGC and can be pulled using the following command:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
Those users who are working on training their own models have two options available:
diff --git a/python/morpheus/morpheus/_lib/common/__init__.pyi b/python/morpheus/morpheus/_lib/common/__init__.pyi
index 38f3c5fd66..8ba9ecf837 100644
--- a/python/morpheus/morpheus/_lib/common/__init__.pyi
+++ b/python/morpheus/morpheus/_lib/common/__init__.pyi
@@ -210,4 +210,4 @@ def typeid_to_numpy_str(arg0: TypeId) -> str:
pass
def write_df_to_file(df: object, filename: str, file_type: FileTypes = FileTypes.Auto, **kwargs) -> None:
pass
-__version__ = '24.10.0'
+__version__ = '25.2.0'
diff --git a/python/morpheus/morpheus/_lib/messages/__init__.pyi b/python/morpheus/morpheus/_lib/messages/__init__.pyi
index 2b52b3d29b..11fba00aee 100644
--- a/python/morpheus/morpheus/_lib/messages/__init__.pyi
+++ b/python/morpheus/morpheus/_lib/messages/__init__.pyi
@@ -265,4 +265,4 @@ class InferenceMemoryFIL(InferenceMemory, TensorMemory):
def seq_ids(self, arg1: object) -> None:
pass
pass
-__version__ = '24.10.0'
+__version__ = '25.2.0'
diff --git a/python/morpheus/morpheus/_lib/modules/__init__.pyi b/python/morpheus/morpheus/_lib/modules/__init__.pyi
index 0ec21dfaad..ed47a38d39 100644
--- a/python/morpheus/morpheus/_lib/modules/__init__.pyi
+++ b/python/morpheus/morpheus/_lib/modules/__init__.pyi
@@ -14,4 +14,4 @@ __all__ = [
]
-__version__ = '24.10.0'
+__version__ = '25.2.0'
diff --git a/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp b/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp
index fc07f38c7c..7abd593392 100644
--- a/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp
+++ b/python/morpheus/morpheus/_lib/src/objects/rmm_tensor.cpp
@@ -29,7 +29,7 @@
#include // for cuda_stream_per_thread
#include
-#include // for copy, transform
+#include // for copy, transform
#include // for multiplies, plus, minus
#include // for back_insert_iterator, back_inserter
#include
diff --git a/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp b/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp
index 690363d166..d6aa159b6d 100644
--- a/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp
+++ b/python/morpheus/morpheus/_lib/src/utilities/table_util.cpp
@@ -26,8 +26,8 @@
#include // for find, transform
#include
-#include // for back_insert_iterator, back_inserter
-#include // for unique_ptr
+#include // for back_insert_iterator, back_inserter
+#include // for unique_ptr
#include // needed for logging
#include // for runtime_error
diff --git a/python/morpheus/morpheus/_lib/stages/__init__.pyi b/python/morpheus/morpheus/_lib/stages/__init__.pyi
index 922c194deb..8d2fe7f911 100644
--- a/python/morpheus/morpheus/_lib/stages/__init__.pyi
+++ b/python/morpheus/morpheus/_lib/stages/__init__.pyi
@@ -85,4 +85,4 @@ class SerializeStage(mrc.core.segment.SegmentObject):
class WriteToFileStage(mrc.core.segment.SegmentObject):
def __init__(self, builder: mrc.core.segment.Builder, name: str, filename: str, mode: str = 'w', file_type: morpheus._lib.common.FileTypes = FileTypes.Auto, include_index_col: bool = True, flush: bool = False) -> None: ...
pass
-__version__ = '24.10.0'
+__version__ = '25.2.0'
diff --git a/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi b/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi
index 842dc8dba0..2f6f52addd 100644
--- a/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi
+++ b/python/morpheus_llm/morpheus_llm/_lib/llm/__init__.pyi
@@ -231,4 +231,4 @@ class LLMTaskHandler():
Task[Optional[list[ControlMessage]]]
"""
pass
-__version__ = '24.10.0'
+__version__ = '25.2.0'
diff --git a/scripts/validation/val-globals.sh b/scripts/validation/val-globals.sh
index 810748fb99..94f3c66982 100755
--- a/scripts/validation/val-globals.sh
+++ b/scripts/validation/val-globals.sh
@@ -26,7 +26,7 @@ export e="\033[0;90m"
export y="\033[0;33m"
export x="\033[0m"
-export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10"}
+export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02"}
# TRITON_GRPC_PORT is only used when TRITON_URL is undefined
export TRITON_GRPC_PORT=${TRITON_GRPC_PORT:-"8001"}
diff --git a/tests/benchmarks/README.md b/tests/benchmarks/README.md
index 6259c79e65..7056a43a0a 100644
--- a/tests/benchmarks/README.md
+++ b/tests/benchmarks/README.md
@@ -24,12 +24,12 @@ Pull Morpheus Models Docker image from NGC.
Example:
```bash
-docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10
+docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02
```
##### Start Triton Inference Server container
```bash
-docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:24.10 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2
+docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2
```
##### Verify Model Deployments