From 5785e44256b757263879580c82cb84adc85bcf5a Mon Sep 17 00:00:00 2001 From: Pradnya Khalate <148914294+khalatepradnya@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:08:39 -0800 Subject: [PATCH 1/5] Fixes for Python notebooks (#2472) Follow-up to PR# 2455 and PR# 2467 * Fix for invalid notebook - hadamard_test.ipynb * Remove explicit setting of target (default target is nvidia if GPU(s) present) - digitized_counterdiabatic_qaoa.ipynb Addresses CI failures in the image validation step. Signed-off-by: Pradnya Khalate --- .../python/digitized_counterdiabatic_qaoa.ipynb | 9 +++------ docs/sphinx/applications/python/hadamard_test.ipynb | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/sphinx/applications/python/digitized_counterdiabatic_qaoa.ipynb b/docs/sphinx/applications/python/digitized_counterdiabatic_qaoa.ipynb index 0ac3df0ca5..a3043b7a1b 100644 --- a/docs/sphinx/applications/python/digitized_counterdiabatic_qaoa.ipynb +++ b/docs/sphinx/applications/python/digitized_counterdiabatic_qaoa.ipynb @@ -44,16 +44,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import cudaq\n", "from cudaq import spin\n", - "import numpy as np\n", - "\n", - "cudaq.set_target('nvidia')\n", - "# cudaq.set_target('qpp-cpu') # Uncomment this line if no GPUs are available" + "import numpy as np\n" ] }, { @@ -65,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [ { diff --git a/docs/sphinx/applications/python/hadamard_test.ipynb b/docs/sphinx/applications/python/hadamard_test.ipynb index a643f59ba3..1d1f781d59 100644 --- a/docs/sphinx/applications/python/hadamard_test.ipynb +++ b/docs/sphinx/applications/python/hadamard_test.ipynb @@ -37,7 +37,7 @@ "![Htest2](./images/htestfactored.png)\n", "\n", "By preparing this circuit, and repeatedly measuring the ancilla qubit, we estimate the expectation value as $$P(0)-P(1) = Re \\bra{\\psi} O \\ket{\\phi}.$$\n", - "\, + "\n", "\n", "The following sections demonstrate how this can be performed in CUDA-Q." ] From e790a98fde7790dfac2a30bb9308c34c8c8bb22f Mon Sep 17 00:00:00 2001 From: Thien Nguyen <58006629+1tnguyen@users.noreply.github.com> Date: Thu, 12 Dec 2024 16:38:51 +1100 Subject: [PATCH 2/5] Fix uninitialized memory issue for the result buffer in kron (#2475) Signed-off-by: Thien Nguyen --- runtime/nvqir/custatevec/CuStateVecCircuitSimulator.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/runtime/nvqir/custatevec/CuStateVecCircuitSimulator.cpp b/runtime/nvqir/custatevec/CuStateVecCircuitSimulator.cpp index 3f4d09bf76..5f4b2f4801 100644 --- a/runtime/nvqir/custatevec/CuStateVecCircuitSimulator.cpp +++ b/runtime/nvqir/custatevec/CuStateVecCircuitSimulator.cpp @@ -216,7 +216,8 @@ class CuStateVecCircuitSimulator void *newDeviceStateVector; HANDLE_CUDA_ERROR(cudaMalloc((void **)&newDeviceStateVector, stateDimension * sizeof(CudaDataType))); - + HANDLE_CUDA_ERROR(cudaMemset(newDeviceStateVector, 0, + stateDimension * sizeof(CudaDataType))); // Place the state data on device. Could be that // we just need the zero state, or the user could have provided one void *otherState; @@ -283,6 +284,8 @@ class CuStateVecCircuitSimulator void *newDeviceStateVector; HANDLE_CUDA_ERROR(cudaMalloc((void **)&newDeviceStateVector, stateDimension * sizeof(CudaDataType))); + HANDLE_CUDA_ERROR(cudaMemset(newDeviceStateVector, 0, + stateDimension * sizeof(CudaDataType))); constexpr int32_t threads_per_block = 256; uint32_t n_blocks = (stateDimension + threads_per_block - 1) / threads_per_block; From aae02de7da304fd1d83b121aad860d3baa28f6ca Mon Sep 17 00:00:00 2001 From: Bettina Heim Date: Thu, 12 Dec 2024 11:23:39 +0100 Subject: [PATCH 3/5] Updating docs version index (#2470) Signed-off-by: Bettina Heim --- .github/workflows/docker_images.yml | 2 +- .../main_divisive_clustering.py | 2 +- .../applications/python/vqe_advanced.ipynb | 2 +- .../examples/python/building_kernels.ipynb | 2 +- .../examples/python/building_kernels.py | 4 ++- .../examples/python/executing_kernels.ipynb | 2 +- docs/sphinx/releases.rst | 31 ++++++++++++++++--- 7 files changed, 35 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docker_images.yml b/.github/workflows/docker_images.yml index 5b4717c570..0e62b2a302 100644 --- a/.github/workflows/docker_images.yml +++ b/.github/workflows/docker_images.yml @@ -694,7 +694,7 @@ jobs: fi image_tag=`docker inspect $cudaq_image --format='{{json .Config.Labels}}' | jq -r '."org.opencontainers.image.version"'` - docs_version="CUDA_QUANTUM_VERSION=${image_tag%-base}" + docs_version="CUDA_QUANTUM_VERSION=$(echo $image_tag | sed -re 's/^(cu[0-9]+-)?(.*)-base$/\2/')" docker image rm $cudaq_image docker image prune --force diff --git a/docs/sphinx/applications/python/divisive_clustering_src/main_divisive_clustering.py b/docs/sphinx/applications/python/divisive_clustering_src/main_divisive_clustering.py index 6ada0c0cde..4065d93755 100644 --- a/docs/sphinx/applications/python/divisive_clustering_src/main_divisive_clustering.py +++ b/docs/sphinx/applications/python/divisive_clustering_src/main_divisive_clustering.py @@ -27,7 +27,7 @@ type=str, choices=["qpp-cpu", "nvidia", "nvidia-mgpu"], help= - "Quantum simulator backend. Default is qpp-cpu. See https://nvidia.github.io/cuda-quantum/0.6.0/using/simulators.html for more options.", + "Quantum simulator backend. Default is qpp-cpu. See https://nvidia.github.io/cuda-quantum for more options.", ) argparser.add_argument( "-d", diff --git a/docs/sphinx/applications/python/vqe_advanced.ipynb b/docs/sphinx/applications/python/vqe_advanced.ipynb index 188df3af45..3135b93829 100644 --- a/docs/sphinx/applications/python/vqe_advanced.ipynb +++ b/docs/sphinx/applications/python/vqe_advanced.ipynb @@ -331,7 +331,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "[\u001b[38;2;255;000;000mwarning\u001b[0m] Target \u001b[38;2;000;000;255mnvidia-mqpu\u001b[0m: \u001b[38;2;000;000;255mThis target is deprecating. Please use the 'nvidia' target with option 'mqpu,fp32' or 'mqpu' (fp32 is the default precision option) by adding the command line option '--target-option mqpu,fp32' or passing it as cudaq.set_target('nvidia', option='mqpu,fp32') in Python. Please refer to CUDA-Q \u001b]8;;https://nvidia.github.io/cuda-quantum/latest/using/backends/platform.html#nvidia-mqpu-platform\u001b\\documentation\u001b]8;;\u001b\\ for more information.\u001b[0m\n" + "[\u001b[38;2;255;000;000mwarning\u001b[0m] Target \u001b[38;2;000;000;255mnvidia-mqpu\u001b[0m: \u001b[38;2;000;000;255mThis target is deprecating. Please use the 'nvidia' target with option 'mqpu,fp32' or 'mqpu' (fp32 is the default precision option) by adding the command line option '--target-option mqpu,fp32' or passing it as cudaq.set_target('nvidia', option='mqpu,fp32') in Python. Please refer to CUDA-Q \u001b]8;;https://nvidia.github.io/cuda-quantum/latest/using/backends/platform\u001b\\documentation\u001b]8;;\u001b\\ for more information.\u001b[0m\n" ] } ], diff --git a/docs/sphinx/examples/python/building_kernels.ipynb b/docs/sphinx/examples/python/building_kernels.ipynb index 366b9f626f..b847b4936e 100644 --- a/docs/sphinx/examples/python/building_kernels.ipynb +++ b/docs/sphinx/examples/python/building_kernels.ipynb @@ -145,7 +145,7 @@ "### Applying Gates\n", "\n", "\n", - "After a kernel is constructed, gates can be applied to start building out a quantum circuit. All the predefined gates in CUDA-Q can be found [here](https://nvidia.github.io/cuda-quantum/latest/api/default_ops.html#unitary-operations-on-qubits).\n", + "After a kernel is constructed, gates can be applied to start building out a quantum circuit. All the predefined gates in CUDA-Q can be found [here](https://nvidia.github.io/cuda-quantum/latest/api/default_ops).\n", "\n", "\n", "Gates can be applied to all qubits in a register:" diff --git a/docs/sphinx/examples/python/building_kernels.py b/docs/sphinx/examples/python/building_kernels.py index 2228d1045b..35583fbb9b 100644 --- a/docs/sphinx/examples/python/building_kernels.py +++ b/docs/sphinx/examples/python/building_kernels.py @@ -100,7 +100,9 @@ def kernel(state: cudaq.State): # ### Applying Gates # # -# After a kernel is constructed, gates can be applied to start building out a quantum circuit. All the predefined gates in CUDA-Q can be found [here](https://nvidia.github.io/cuda-quantum/latest/api/default_ops.html#unitary-operations-on-qubits). +# After a kernel is constructed, gates can be applied to start building out a quantum circuit. +# All the predefined gates in CUDA-Q can be found here: +# https://nvidia.github.io/cuda-quantum/api/default_ops. # # # Gates can be applied to all qubits in a register: diff --git a/docs/sphinx/examples/python/executing_kernels.ipynb b/docs/sphinx/examples/python/executing_kernels.ipynb index 66a7afee3b..0d3250c8c1 100644 --- a/docs/sphinx/examples/python/executing_kernels.ipynb +++ b/docs/sphinx/examples/python/executing_kernels.ipynb @@ -78,7 +78,7 @@ "source": [ "Note that there is a subtle difference between how `sample` is executed with the target device set to a simulator or with the target device set to a QPU. In simulation mode, the quantum state is built once and then sampled $s$ times where $s$ equals the `shots_count`. In hardware execution mode, the quantum state collapses upon measurement and hence needs to be rebuilt over and over again.\n", "\n", - "There are a number of helpful tools that can be found in the API [here](https://nvidia.github.io/cuda-quantum/latest/api/languages/python_api.html#cudaq.SampleResult) to process the `Sample_Result` object produced by `sample`." + "There are a number of helpful tools that can be found in the [API docs](https://nvidia.github.io/cuda-quantum/latest/api/languages/python_api) to process the `Sample_Result` object produced by `sample`." ] }, { diff --git a/docs/sphinx/releases.rst b/docs/sphinx/releases.rst index 6a8a353462..de3e12d78b 100644 --- a/docs/sphinx/releases.rst +++ b/docs/sphinx/releases.rst @@ -4,12 +4,35 @@ CUDA-Q Releases **latest** -The latest version of CUDA-Q is on the main branch of our `GitHub repository `__ and is also available as a Docker image. More information about installing the nightly builds can be found :doc:`here ` +The latest version of CUDA-Q is on the main branch of our `GitHub repository `__ +and is also available as a Docker image. More information about installing the nightly builds can be found +:doc:`here ` - `Docker image (nightly builds) `__ - `Documentation `__ - `Examples `__ +**0.9.0** + +We are very excited to share a new toolset added for modeling and manipulating the dynamics of physical systems. +The new API allows to define and execute a time evolution under arbitrary operators. For more information, take +a look at the `docs `__. +The 0.9.0 release furthermore includes a range of contribution to add new backends to CUDA-Q, including backends +from `Anyon Technologies `__, +`Ferimioniq `__, and +`QuEra Computing `__, +as well as updates to existing backends from `ORCA `__ +and `OQC `__. +We hope you enjoy the new features - also check out our new notebooks and examples to dive into CUDA-Q. + +- `Docker image `__ +- `Python wheel `__ +- `C++ installer `__ +- `Documentation `__ +- `Examples `__ + +The full change log can be found `here `__. + **0.8.0** The 0.8.0 release adds a range of changes to improve the ease of use and performance with CUDA-Q. @@ -17,7 +40,7 @@ The changes listed below highlight some of what we think will be the most useful to know about. While the listed changes do not capture all of the great contributions, we would like to extend many thanks for every contribution, in particular those from external contributors. -- `Docker image `__ +- `Docker image `__ - `Python wheel `__ - `C++ installer `__ - `Documentation `__ @@ -30,7 +53,7 @@ The full change log can be found `here `__. +`here `__. It furthermore adds a range of bug fixes and changes the Python wheel installation instructions. - `Docker image `__ @@ -46,7 +69,7 @@ The full change log can be found `here `, giving you access to our most powerful GPU-accelerated simulators even if you don't have an NVIDIA GPU. With 0.7.0, we have furthermore greatly increased expressiveness of the Python and C++ language frontends. -Check out our `documentation `__ +Check out our `documentation `__ to get started with the new Python syntax support we have added, and `follow our blog `__ to learn more about the new setup and its performance benefits. From 09f5e1f0d89f96c9827f3fb4f731adea01e6c21a Mon Sep 17 00:00:00 2001 From: Eric Schweitz Date: Thu, 12 Dec 2024 10:07:10 -0800 Subject: [PATCH 4/5] Convert static variable to a constexpr. (#2471) Make this static data structure constant for efficiency and to eliminate an initialization ctor. Signed-off-by: Eric Schweitz --- lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp b/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp index 2ae90d302b..58526dc692 100644 --- a/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp +++ b/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp @@ -516,7 +516,7 @@ namespace { /// trivial pass only does this preparation work. It performs no analysis and /// does not rewrite function body's, etc. -static const std::vector measurementFunctionNames{ +static constexpr std::array measurementFunctionNames{ cudaq::opt::QIRMeasureBody, cudaq::opt::QIRMeasure, cudaq::opt::QIRMeasureToRegister}; @@ -564,7 +564,7 @@ struct QIRProfilePreparationPass func.getFunctionType().getParams(), module); // Apply irreversible attribute to measurement functions - for (auto &funcName : measurementFunctionNames) { + for (auto *funcName : measurementFunctionNames) { Operation *op = SymbolTable::lookupSymbolIn(module, funcName); auto funcOp = llvm::dyn_cast_if_present(op); if (funcOp) { From 95134fdaeb8f94fc60833182e8af966daeb25c40 Mon Sep 17 00:00:00 2001 From: Pradnya Khalate <148914294+khalatepradnya@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:35:55 -0800 Subject: [PATCH 5/5] Enable nightly integration tests for `infleqtion` target (#2469) Signed-off-by: Pradnya Khalate --- .github/workflows/integration_tests.yml | 46 +++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 55d329b93f..5c5bc28300 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -17,6 +17,7 @@ on: options: - nightly - anyon + - infleqtion - ionq - iqm - oqc @@ -650,6 +651,51 @@ jobs: fi shell: bash + - name: Submit to Infleqtion test server + if: (success() || failure()) && (inputs.target == 'infleqtion' || github.event_name == 'schedule' || inputs.target == 'nightly') + run: | + echo "### Submit to Infleqtion server" >> $GITHUB_STEP_SUMMARY + export SUPERSTAQ_API_KEY='${{ secrets.SUPERSTAQ_API_KEY }}' + set +e # Allow script to keep going through errors + test_err_sum=0 + cpp_tests="docs/sphinx/targets/cpp/infleqtion.cpp" + for filename in $cpp_tests; do + [ -e "$filename" ] || echo "::error::Couldn't find file ($filename)" + nvq++ --target infleqtion $filename + test_status=$? + if [ $test_status -eq 0 ]; then + ./a.out + test_status=$? + if [ $test_status -eq 0 ]; then + echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY + else + echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY + test_err_sum=$((test_err_sum+1)) + fi + else + echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY + test_err_sum=$((test_err_sum+1)) + fi + done + python_tests="docs/sphinx/targets/python/infleqtion.py" + for filename in $python_tests; do + [ -e "$filename" ] || echo "::error::Couldn't find file ($filename)" + python3 $filename 1> /dev/null + test_status=$? + if [ $test_status -eq 0 ]; then + echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY + else + echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY + test_err_sum=$((test_err_sum+1)) + fi + done + set -e # Re-enable exit code error checking + if [ ! $test_err_sum -eq 0 ]; then + echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" + exit 1 + fi + shell: bash + - name: Submit to ${{ inputs.target }} # The full set of tests used by this step is currently only supported on # Quantinuum. The other supported tests are tested by the step above.