diff --git a/docs/data.json b/docs/data.json index 9e355dc04..1e234db75 100644 --- a/docs/data.json +++ b/docs/data.json @@ -399,6 +399,46 @@ "timestamp_utc": "2023-06-29T07:26:25.583573", "transformers4rec": "23.6.0", "triton": "2.33.0" + }, + "23.08": { + "base_container": "Triton version 23.06", + "compressedSize": "5.26 GB", + "cublas": "12.1.3.1", + "cuda": "12.1.1.009", + "cudf": "23.04.00", + "cudnn": "8.9.2.26", + "cufft": "11.0.2.54", + "curand": "10.3.2.106", + "cusolver": "11.4.5.107", + "cusparse": "12.1.0.106", + "cutensor": "1.7.0.1", + "dgx_system": "* DGX-1\n* DGX-2\n* DGX A100\n* DGX Station", + "distributed_embeddings": "Not applicable", + "gpu_model": "* `NVIDIA Ampere GPU Architecture `_\n* `Turing `_\n* `Volta `_\n* `Pascal `_", + "hugectr": "23.8.0", + "hugectr2onnx": "Not applicable", + "merlin.core": "23.8.0", + "merlin.dataloader": "23.8.0", + "merlin.models": "23.8.0", + "merlin.systems": "23.8.0", + "nvidia_driver": "NVIDIA Driver version 465.19.01\nor later is required. However,\nif you're running on Data Center\nGPUs (formerly Tesla) such as T4,\nyou can use any of the following\nNVIDIA Driver versions:\n\n* 418.40 (or later R418)\n* 440.33 (or later R440)\n* 450.51 (or later R450)\n* 460.27 (or later R460)\n\n**Note**: The CUDA Driver\nCompatibility Package does not\nsupport all drivers.", + "nvidia_pytorch": "Not applicable", + "nvidia_tensorflow": "Not applicable", + "nvtabular": "23.8.0", + "openmpi": "4.1.5rc2", + "os": "Ubuntu 22.04.2 LTS", + "python_major": "3", + "pytorch": "Not applicable", + "release": "23.08", + "rmm": "23.04.00", + "size": "525.1 GB", + "sm": "70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90, 70, 75, 80, 90", + "sparse_operation_kit": "Not applicable", + "tensorrt": "8.6.1.6+cuda12.0.1.011", + "tf": "Not applicable", + "timestamp_utc": "2023-08-31T07:29:13.848524", + "transformers4rec": "23.8.0", + "triton": "2.35.0" } }, "nvcr.io/nvidia/merlin/merlin-inference": { @@ -1115,6 +1155,46 @@ "timestamp_utc": "2023-06-29T07:25:52.907749", "transformers4rec": "23.6.0", "triton": "2.33.0" + }, + "23.08": { + "base_container": "Triton version 23.06", + "compressedSize": "6.77 GB", + "cublas": "12.1.3.1", + "cuda": "12.1.1.009", + "cudf": "23.04.00", + "cudnn": "8.9.2.26", + "cufft": "11.0.2.54", + "curand": "10.3.2.106", + "cusolver": "11.4.5.107", + "cusparse": "12.1.0.106", + "cutensor": "1.7.0.1", + "dgx_system": "* DGX-1\n* DGX-2\n* DGX A100\n* DGX Station", + "distributed_embeddings": "Not applicable", + "gpu_model": "* `NVIDIA Ampere GPU Architecture `_\n* `Turing `_\n* `Volta `_\n* `Pascal `_", + "hugectr": "Not applicable", + "hugectr2onnx": "Not applicable", + "merlin.core": "23.8.0", + "merlin.dataloader": "23.8.0", + "merlin.models": "23.8.0", + "merlin.systems": "23.8.0", + "nvidia_driver": "NVIDIA Driver version 465.19.01\nor later is required. However,\nif you're running on Data Center\nGPUs (formerly Tesla) such as T4,\nyou can use any of the following\nNVIDIA Driver versions:\n\n* 418.40 (or later R418)\n* 440.33 (or later R440)\n* 450.51 (or later R450)\n* 460.27 (or later R460)\n\n**Note**: The CUDA Driver\nCompatibility Package does not\nsupport all drivers.", + "nvidia_pytorch": "Not applicable", + "nvidia_tensorflow": "Not applicable", + "nvtabular": "23.8.0", + "openmpi": "4.1.5rc2", + "os": "Ubuntu 22.04.2 LTS", + "python_major": "3", + "pytorch": "2.0.1", + "release": "23.08", + "rmm": "23.04.00", + "size": "528.11 GB", + "sm": "Not applicable", + "sparse_operation_kit": "Not applicable", + "tensorrt": "8.6.1.6+cuda12.0.1.011", + "tf": "Not applicable", + "timestamp_utc": "2023-08-31T07:28:34.716445", + "transformers4rec": "23.8.0", + "triton": "2.35.0" } }, "nvcr.io/nvidia/merlin/merlin-pytorch-inference": { @@ -1950,6 +2030,46 @@ "timestamp_utc": "2023-06-29T07:25:15.869683", "transformers4rec": "23.6.0", "triton": "2.33.0" + }, + "23.08": { + "base_container": "Triton version 23.06", + "compressedSize": "6.54 GB", + "cublas": "12.1.3.1", + "cuda": "12.1.1.009", + "cudf": "23.04.00", + "cudnn": "8.9.2.26", + "cufft": "11.0.2.54", + "curand": "10.3.2.106", + "cusolver": "11.4.5.107", + "cusparse": "12.1.0.106", + "cutensor": "1.7.0.1", + "dgx_system": "* DGX-1\n* DGX-2\n* DGX A100\n* DGX Station", + "distributed_embeddings": "Not applicable", + "gpu_model": "* `NVIDIA Ampere GPU Architecture `_\n* `Turing `_\n* `Volta `_\n* `Pascal `_", + "hugectr": "Not applicable", + "hugectr2onnx": "Not applicable", + "merlin.core": "23.8.0", + "merlin.dataloader": "23.8.0", + "merlin.models": "23.8.0", + "merlin.systems": "23.8.0", + "nvidia_driver": "NVIDIA Driver version 465.19.01\nor later is required. However,\nif you're running on Data Center\nGPUs (formerly Tesla) such as T4,\nyou can use any of the following\nNVIDIA Driver versions:\n\n* 418.40 (or later R418)\n* 440.33 (or later R440)\n* 450.51 (or later R450)\n* 460.27 (or later R460)\n\n**Note**: The CUDA Driver\nCompatibility Package does not\nsupport all drivers.", + "nvidia_pytorch": "Not applicable", + "nvidia_tensorflow": "Not applicable", + "nvtabular": "23.8.0", + "openmpi": "4.1.5rc2", + "os": "Ubuntu 22.04.2 LTS", + "python_major": "3", + "pytorch": "Not applicable", + "release": "23.08", + "rmm": "23.04.00", + "size": "528.47 GB", + "sm": "Not applicable", + "sparse_operation_kit": "1.2.0", + "tensorrt": "8.6.1.6+cuda12.0.1.011", + "tf": "2.12.0", + "timestamp_utc": "2023-08-31T07:27:48.736584", + "transformers4rec": "23.8.0", + "triton": "2.35.0" } }, "nvcr.io/nvidia/merlin/merlin-tensorflow-inference": {