From cf2f7950f0be822fd187fbf40ec8f03c2dcf6805 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Thu, 26 Oct 2023 17:44:02 +0300 Subject: [PATCH 01/22] Disallow unsupported options (#1108) * Disallow unsupported options * Moved checking of unsupported options to 'flexible' decorator * Modified the test to give TypeError where needed * Removed empty newline * Moved tests from test_ibm_primitives to test_options, because they don't require a primitive * typo * Release note * black and lint * black again * Fixed test failing in CI * Removed _flexible decorator. Moved _post_init into Options class * lint * lint * Fixed bug * lint --------- Co-authored-by: Kevin Tian --- qiskit_ibm_runtime/accounts/account.py | 2 +- .../options/environment_options.py | 3 - .../options/execution_options.py | 2 - qiskit_ibm_runtime/options/options.py | 12 ++- .../options/resilience_options.py | 3 - .../options/simulator_options.py | 9 +-- .../options/transpilation_options.py | 2 - qiskit_ibm_runtime/options/utils.py | 44 ----------- ...emove_kwargs_options-9024d3ec6572a53e.yaml | 4 + test/unit/test_ibm_primitives.py | 43 ---------- test/unit/test_options.py | 79 ++++++------------- 11 files changed, 44 insertions(+), 159 deletions(-) create mode 100644 releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml diff --git a/qiskit_ibm_runtime/accounts/account.py b/qiskit_ibm_runtime/accounts/account.py index 7e71b95dc..2e5ed6bbb 100644 --- a/qiskit_ibm_runtime/accounts/account.py +++ b/qiskit_ibm_runtime/accounts/account.py @@ -182,7 +182,7 @@ def _assert_valid_channel(channel: ChannelType) -> None: if not (channel in ["ibm_cloud", "ibm_quantum"]): raise InvalidAccountError( f"Invalid `channel` value. Expected one of " - f"{['ibm_cloud', 'ibm_quantum']}, got '{channel}'." + f"['ibm_cloud', 'ibm_quantum'], got '{channel}'." ) @staticmethod diff --git a/qiskit_ibm_runtime/options/environment_options.py b/qiskit_ibm_runtime/options/environment_options.py index becd2cdb4..ebb183adf 100644 --- a/qiskit_ibm_runtime/options/environment_options.py +++ b/qiskit_ibm_runtime/options/environment_options.py @@ -15,8 +15,6 @@ from typing import Optional, Callable, List, Literal, get_args from dataclasses import dataclass, field -from .utils import _flexible - LogLevelType = Literal[ "DEBUG", "INFO", @@ -26,7 +24,6 @@ ] -@_flexible @dataclass class EnvironmentOptions: """Options related to the execution environment. diff --git a/qiskit_ibm_runtime/options/execution_options.py b/qiskit_ibm_runtime/options/execution_options.py index 01022f7d7..a8b9196d1 100644 --- a/qiskit_ibm_runtime/options/execution_options.py +++ b/qiskit_ibm_runtime/options/execution_options.py @@ -15,7 +15,6 @@ from dataclasses import dataclass from typing import Literal, get_args -from .utils import _flexible ExecutionSupportedOptions = Literal[ "shots", @@ -23,7 +22,6 @@ ] -@_flexible @dataclass class ExecutionOptions: """Execution options. diff --git a/qiskit_ibm_runtime/options/options.py b/qiskit_ibm_runtime/options/options.py index f69016562..d5ae665de 100644 --- a/qiskit_ibm_runtime/options/options.py +++ b/qiskit_ibm_runtime/options/options.py @@ -19,7 +19,8 @@ from qiskit.transpiler import CouplingMap -from .utils import _flexible, Dict +from .utils import Dict, _to_obj + from .environment_options import EnvironmentOptions from .execution_options import ExecutionOptions from .simulator_options import SimulatorOptions @@ -28,7 +29,6 @@ from ..runtime_options import RuntimeOptions -@_flexible @dataclass class Options: """Options for the primitives. @@ -113,6 +113,14 @@ class Options: "resilience": ResilienceOptions, } + def __post_init__(self): # type: ignore + """Convert dictionary fields to object.""" + obj_fields = getattr(self, "_obj_fields", {}) + for key in list(obj_fields): + if hasattr(self, key): + orig_val = getattr(self, key) + setattr(self, key, _to_obj(obj_fields[key], orig_val)) + @staticmethod def _get_program_inputs(options: dict) -> dict: """Convert the input options to program compatible inputs. diff --git a/qiskit_ibm_runtime/options/resilience_options.py b/qiskit_ibm_runtime/options/resilience_options.py index c15638c1a..3499c56ac 100644 --- a/qiskit_ibm_runtime/options/resilience_options.py +++ b/qiskit_ibm_runtime/options/resilience_options.py @@ -15,8 +15,6 @@ from typing import Sequence, Literal, get_args from dataclasses import dataclass -from .utils import _flexible - ResilienceSupportedOptions = Literal[ "noise_amplifier", "noise_factors", @@ -33,7 +31,6 @@ ] -@_flexible @dataclass class ResilienceOptions: """Resilience options. diff --git a/qiskit_ibm_runtime/options/simulator_options.py b/qiskit_ibm_runtime/options/simulator_options.py index 85847282e..5506e4845 100644 --- a/qiskit_ibm_runtime/options/simulator_options.py +++ b/qiskit_ibm_runtime/options/simulator_options.py @@ -19,10 +19,7 @@ from qiskit.exceptions import MissingOptionalLibraryError from qiskit.providers import BackendV1, BackendV2 from qiskit.utils import optionals - -from qiskit.transpiler import CouplingMap - -from .utils import _flexible +from qiskit.transpiler import CouplingMap # pylint: disable=unused-import if TYPE_CHECKING: import qiskit_aer @@ -35,7 +32,6 @@ ] -@_flexible @dataclass() class SimulatorOptions: """Simulator options. @@ -78,6 +74,9 @@ def set_backend(self, backend: Union[BackendV1, BackendV2]) -> None: Args: backend: backend to be set. + + Raises: + MissingOptionalLibraryError if qiskit-aer is not found. """ if not optionals.HAS_AER: raise MissingOptionalLibraryError( diff --git a/qiskit_ibm_runtime/options/transpilation_options.py b/qiskit_ibm_runtime/options/transpilation_options.py index 90200f8ea..fb3e96ae6 100644 --- a/qiskit_ibm_runtime/options/transpilation_options.py +++ b/qiskit_ibm_runtime/options/transpilation_options.py @@ -15,7 +15,6 @@ from typing import Optional, List, Union, Literal, get_args from dataclasses import dataclass -from .utils import _flexible TranspilationSupportedOptions = Literal[ "skip_transpilation", @@ -39,7 +38,6 @@ ] -@_flexible @dataclass class TranspilationOptions: """Transpilation options. diff --git a/qiskit_ibm_runtime/options/utils.py b/qiskit_ibm_runtime/options/utils.py index 78cd0ad21..3a744f5b5 100644 --- a/qiskit_ibm_runtime/options/utils.py +++ b/qiskit_ibm_runtime/options/utils.py @@ -12,7 +12,6 @@ """Utility functions for options.""" -from dataclasses import fields, field, make_dataclass from ..ibm_backend import IBMBackend @@ -65,49 +64,6 @@ def _to_obj(cls_, data): # type: ignore ) -def _post_init(self): # type: ignore - """Convert dictionary fields to object.""" - - obj_fields = getattr(self, "_obj_fields", {}) - for key in obj_fields.keys(): - if hasattr(self, key): - orig_val = getattr(self, key) - setattr(self, key, _to_obj(obj_fields[key], orig_val)) - - -def _flexible(cls): # type: ignore - """Decorator used to allow a flexible dataclass. - - This is used to dynamically create a new dataclass with the - arbitrary kwargs input converted to fields. It also converts - input dictionary to objects based on the _obj_fields attribute. - """ - - def __new__(cls, *_, **kwargs): # type: ignore - all_fields = [] - orig_field_names = set() - - for fld in fields(cls): - all_fields.append((fld.name, fld.type, fld)) - orig_field_names.add(fld.name) - - for key, val in kwargs.items(): - if key not in orig_field_names: - all_fields.append((key, type(val), field(default=None))) - - new_cls = make_dataclass( - cls.__name__, - all_fields, - bases=(cls,), - namespace={"__post_init__": _post_init}, - ) - obj = object.__new__(new_cls) - return obj - - cls.__new__ = __new__ - return cls - - class Dict: """Fake Dict type. diff --git a/releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml b/releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml new file mode 100644 index 000000000..e40a4f40c --- /dev/null +++ b/releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Arbitrary keys and values are no longer allowed in ``Options``. diff --git a/test/unit/test_ibm_primitives.py b/test/unit/test_ibm_primitives.py index 4044acd67..9314e2a15 100644 --- a/test/unit/test_ibm_primitives.py +++ b/test/unit/test_ibm_primitives.py @@ -16,7 +16,6 @@ import copy import os from unittest.mock import MagicMock, patch -import warnings from dataclasses import asdict from typing import Dict @@ -85,28 +84,6 @@ def test_dict_options(self): self._update_dict(expected, copy.deepcopy(options)) self.assertDictEqual(expected, inst.options.__dict__) - def test_backend_in_options(self): - """Test specifying backend in options.""" - primitives = [Sampler, Estimator] - backend_name = "ibm_gotham" - backend = MagicMock(spec=IBMBackend) - backend._instance = None - backend.name = backend_name - backends = [backend_name, backend] - for cls in primitives: - for backend in backends: - with self.subTest(primitive=cls, backend=backend): - options = {"backend": backend} - with warnings.catch_warnings(record=True) as warn: - warnings.simplefilter("always") - cls(session=MagicMock(spec=MockSession), options=options) - self.assertTrue( - all( - issubclass(one_warn.category, DeprecationWarning) - for one_warn in warn - ) - ) - def test_runtime_options(self): """Test RuntimeOptions specified as primitive options.""" session = MagicMock(spec=MockSession) @@ -415,22 +392,6 @@ def test_run_overwrite_runtime_options(self): rt_options = kwargs["options"] self._assert_dict_partially_equal(rt_options, options) - def test_kwarg_options(self): - """Test specifying arbitrary options.""" - session = MagicMock(spec=MockSession) - primitives = [Sampler, Estimator] - for cls in primitives: - with self.subTest(primitive=cls): - options = Options(foo="foo") # pylint: disable=unexpected-keyword-arg - inst = cls(session=session, options=options) - inst.run(self.qx, observables=self.obs) - if sys.version_info >= (3, 8): - inputs = session.run.call_args.kwargs["inputs"] - else: - _, kwargs = session.run.call_args - inputs = kwargs["inputs"] - self.assertEqual(inputs.get("foo"), "foo") - def test_run_kwarg_options(self): """Test specifying arbitrary options in run.""" session = MagicMock(spec=MockSession) @@ -477,10 +438,6 @@ def test_set_options(self): new_options = [ ({"optimization_level": 2}, Options()), ({"optimization_level": 3, "shots": 200}, Options()), - ( - {"shots": 300, "foo": "foo"}, - Options(foo="foo"), # pylint: disable=unexpected-keyword-arg - ), ] session = MagicMock(spec=MockSession) diff --git a/test/unit/test_options.py b/test/unit/test_options.py index ba39839de..572bd0950 100644 --- a/test/unit/test_options.py +++ b/test/unit/test_options.py @@ -12,7 +12,6 @@ """Tests for Options class.""" -import warnings from dataclasses import asdict from ddt import data, ddt @@ -62,51 +61,6 @@ def test_merge_options(self): f"options={options}, combined={combined}", ) - def test_merge_options_extra_fields(self): - """Test merging options with extra fields.""" - options_vars = [ - ( - { - "initial_layout": [2, 3], - "transpilation": {"layout_method": "trivial"}, - "foo": "foo", - }, - Options(foo="foo"), # pylint: disable=unexpected-keyword-arg - ), - ( - { - "initial_layout": [3, 4], - "transpilation": {"layout_method": "dense", "bar": "bar"}, - }, - Options(transpilation={"bar": "bar"}), - ), - ( - { - "initial_layout": [1, 2], - "foo": "foo", - "transpilation": {"layout_method": "dense", "foo": "foo"}, - }, - Options( # pylint: disable=unexpected-keyword-arg - foo="foo", transpilation={"foo": "foo"} - ), - ), - ] - for new_ops, expected in options_vars: - with self.subTest(new_ops=new_ops): - options = Options() - combined = Options._merge_options(asdict(options), new_ops) - - # Make sure the values are equal. - self.assertTrue( - flat_dict_partially_equal(combined, new_ops), - f"new_ops={new_ops}, combined={combined}", - ) - # Make sure the structure didn't change. - self.assertTrue( - dict_keys_equal(combined, asdict(expected)), - f"expected={expected}, combined={combined}", - ) - def test_runtime_options(self): """Test converting runtime options.""" full_options = RuntimeOptions( @@ -137,14 +91,8 @@ def test_program_inputs(self): environment={"log_level": "DEBUG"}, simulator={"noise_model": noise_model}, resilience={"noise_factors": (0, 2, 4)}, - foo="foo", - bar="bar", ) - - with warnings.catch_warnings(record=True) as warn: - warnings.simplefilter("always") - inputs = Options._get_program_inputs(asdict(options)) - self.assertEqual(len(warn), 2) + inputs = Options._get_program_inputs(asdict(options)) expected = { "run_options": {"shots": 100, "noise_model": noise_model}, @@ -157,7 +105,6 @@ def test_program_inputs(self): "level": 2, "noise_factors": (0, 2, 4), }, - "foo": "foo", } self.assertTrue( dict_paritally_equal(inputs, expected), @@ -191,6 +138,30 @@ def test_init_options_with_dictionary(self): # Make sure the structure didn't change. self.assertTrue(dict_keys_equal(asdict(Options()), options), f"options={options}") + def test_kwargs_options(self): + """Test specifying arbitrary options.""" + with self.assertRaises(TypeError) as exc: + _ = Options(foo="foo") # pylint: disable=unexpected-keyword-arg + self.assertIn( + "__init__() got an unexpected keyword argument 'foo'", + str(exc.exception), + ) + + def test_backend_in_options(self): + """Test specifying backend in options.""" + backend_name = "ibm_gotham" + backend = FakeManila() + backend._instance = None + backend.name = backend_name + backends = [backend_name, backend] + for backend in backends: + with self.assertRaises(TypeError) as exc: + _ = Options(backend=backend) # pylint: disable=unexpected-keyword-arg + self.assertIn( + "__init__() got an unexpected keyword argument 'backend'", + str(exc.exception), + ) + def test_unsupported_options(self): """Test error on unsupported second level options""" # defining minimal dict of options From ab408ddbde4ac081bd14ee61bc70a8fd377908c2 Mon Sep 17 00:00:00 2001 From: Frank Harkins Date: Fri, 27 Oct 2023 18:16:34 +0100 Subject: [PATCH 02/22] Update Vale rules (#1177) * Update Vale rules * Don't block on latin abbreviations * Copy over latest changes --------- Co-authored-by: Rebecca Dimock <66339736+beckykd@users.noreply.github.com> --- docs/.vale.ini | 1 + test/docs/IBMQuantum/Abbreviations.yml | 2 +- test/docs/IBMQuantum/Accessibility.yml | 15 ++ test/docs/IBMQuantum/BestPractice.yml | 20 ++ test/docs/IBMQuantum/CommonTypos.yml | 12 + test/docs/IBMQuantum/Contractions.yml | 5 + test/docs/IBMQuantum/DashSpacing.yml | 2 +- test/docs/IBMQuantum/Definitions.yml | 2 +- test/docs/IBMQuantum/Headings.yml | 2 +- test/docs/IBMQuantum/Latin.yml | 6 +- test/docs/IBMQuantum/Links.yml | 13 ++ test/docs/IBMQuantum/ListPunctuation.yml | 7 + test/docs/IBMQuantum/OxfordComma.yml | 2 +- test/docs/IBMQuantum/Recommend.yml | 12 + test/docs/IBMQuantum/Terms.yml | 275 +++++++++-------------- test/docs/IBMQuantum/UIConsistency.yml | 29 +++ test/docs/IBMQuantum/Wordiness.yml | 29 +++ test/docs/dictionary.txt | 3 + 18 files changed, 256 insertions(+), 181 deletions(-) create mode 100644 test/docs/IBMQuantum/Accessibility.yml create mode 100644 test/docs/IBMQuantum/BestPractice.yml create mode 100644 test/docs/IBMQuantum/CommonTypos.yml create mode 100644 test/docs/IBMQuantum/Contractions.yml create mode 100644 test/docs/IBMQuantum/Links.yml create mode 100644 test/docs/IBMQuantum/ListPunctuation.yml create mode 100644 test/docs/IBMQuantum/Recommend.yml create mode 100644 test/docs/IBMQuantum/UIConsistency.yml create mode 100644 test/docs/IBMQuantum/Wordiness.yml diff --git a/docs/.vale.ini b/docs/.vale.ini index 13557c9fc..eb9555aa7 100644 --- a/docs/.vale.ini +++ b/docs/.vale.ini @@ -3,6 +3,7 @@ MinAlertLevel = suggestion [[!_]**.{md,rst}] BasedOnStyles = IBMQuantum +IBMQuantum.Latin = warning [apidocs/ibm-runtime.rst] IBMQuantum.Headings = NO diff --git a/test/docs/IBMQuantum/Abbreviations.yml b/test/docs/IBMQuantum/Abbreviations.yml index e7b85f235..0ffeb06d5 100644 --- a/test/docs/IBMQuantum/Abbreviations.yml +++ b/test/docs/IBMQuantum/Abbreviations.yml @@ -1,6 +1,6 @@ extends: existence message: "Do not use periods in all-uppercase abbreviations such as '%s'." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#N100DC' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=punctuation-periods#abbreviations' level: error nonword: true tokens: diff --git a/test/docs/IBMQuantum/Accessibility.yml b/test/docs/IBMQuantum/Accessibility.yml new file mode 100644 index 000000000..f935a3cc2 --- /dev/null +++ b/test/docs/IBMQuantum/Accessibility.yml @@ -0,0 +1,15 @@ +extends: substitution +message: Use '%s' rather than '%s' if referring to position in a document (for screen readers). +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage' +level: warning +ignorecase: true +action: + name: replace +# swap maps tokens in form of bad: good +swap: + 'down(?:-)?level': earlier|previous|not at the latest level + 'down(?:-)?level': earlier|previous|not at the latest level + above: previous + below: following + bottom: end|last + top: start|beginning|first diff --git a/test/docs/IBMQuantum/BestPractice.yml b/test/docs/IBMQuantum/BestPractice.yml new file mode 100644 index 000000000..4949e8044 --- /dev/null +++ b/test/docs/IBMQuantum/BestPractice.yml @@ -0,0 +1,20 @@ +extends: existence +message: "Don't use '%s'" +link: 'https://github.com/IBM/ibm-quantum-style-guide/issues/22' +ignorecase: true +scope: sentence +level: warning +tokens: + - allows you to + - easy + - enable + - enables + - end user + - end users + - in the future + - that's all there is to it + - very + - we encourage you + - we understand that + - you can see + - you can think of this diff --git a/test/docs/IBMQuantum/CommonTypos.yml b/test/docs/IBMQuantum/CommonTypos.yml new file mode 100644 index 000000000..67ed08e1e --- /dev/null +++ b/test/docs/IBMQuantum/CommonTypos.yml @@ -0,0 +1,12 @@ +# For words that are probably typos (in quantum computing), but aren't caught +# by the spell check +extends: substitution +message: "Did you mean '%s' instead of '%s'?" +ignorecase: true +level: warning +action: + name: replace +swap: + Shore: Shor + Grove: Grover + open source: open-source diff --git a/test/docs/IBMQuantum/Contractions.yml b/test/docs/IBMQuantum/Contractions.yml new file mode 100644 index 000000000..5d697336c --- /dev/null +++ b/test/docs/IBMQuantum/Contractions.yml @@ -0,0 +1,5 @@ +extends: existence +message: "Remove the hyphen in '%s'." +level: suggestion +tokens: + - 'non-(\w+)' diff --git a/test/docs/IBMQuantum/DashSpacing.yml b/test/docs/IBMQuantum/DashSpacing.yml index ea9a103b7..40077d81f 100644 --- a/test/docs/IBMQuantum/DashSpacing.yml +++ b/test/docs/IBMQuantum/DashSpacing.yml @@ -1,6 +1,6 @@ extends: existence message: "Add spaces around the dash in '%s'." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#N106BF' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=punctuation-dashes' ignorecase: true nonword: true level: error diff --git a/test/docs/IBMQuantum/Definitions.yml b/test/docs/IBMQuantum/Definitions.yml index b60b88851..c32adea14 100644 --- a/test/docs/IBMQuantum/Definitions.yml +++ b/test/docs/IBMQuantum/Definitions.yml @@ -1,6 +1,6 @@ extends: conditional message: "Define acronyms and abbreviations (such as '%s') on first occurrence if they're likely to be unfamiliar." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#N100DC' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=grammar-abbreviations#general-guidelines' level: suggestion ignorecase: false # Ensures that the existence of 'first' implies the existence of 'second'. diff --git a/test/docs/IBMQuantum/Headings.yml b/test/docs/IBMQuantum/Headings.yml index cd0f4f770..57f2f7fc5 100644 --- a/test/docs/IBMQuantum/Headings.yml +++ b/test/docs/IBMQuantum/Headings.yml @@ -1,6 +1,6 @@ extends: capitalization message: "'%s' should use sentence-style capitalization." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#N1030C' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=grammar-capitalization#headings-titles-and-banners' level: warning scope: heading match: $sentence diff --git a/test/docs/IBMQuantum/Latin.yml b/test/docs/IBMQuantum/Latin.yml index b0d2b2b1c..68596d917 100644 --- a/test/docs/IBMQuantum/Latin.yml +++ b/test/docs/IBMQuantum/Latin.yml @@ -1,6 +1,6 @@ extends: substitution message: "Use '%s' instead of '%s'." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#wordlist' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage' ignorecase: true level: error nonword: true @@ -10,4 +10,6 @@ swap: '\b(?:eg|e\.g\.)[\s,]': for example '\b(?:ie|i\.e\.)[\s,]': that is '\betc\.': and so on - '\bvs\.': versus + '\bvs\.': compared to + '\bvia\b': through + '\bversus\b': compared to diff --git a/test/docs/IBMQuantum/Links.yml b/test/docs/IBMQuantum/Links.yml new file mode 100644 index 000000000..b429b6ff9 --- /dev/null +++ b/test/docs/IBMQuantum/Links.yml @@ -0,0 +1,13 @@ +extends: existence +message: Link names should make sense without context; change "%s" to something more descriptive. +level: suggestion +scope: raw +ignorecase: true +tokens: + # Manually match markdown / rst links + - '(?<=\[)(click )?here(?=\](\s*?)\()' + - '(?<=`)(click )?here(?=((\s*?)<(.+?)>)?`_)' + - '(?<=\[)(read )?more( information)?(?=\](\s*?)\()' + - '(?<=`)(read )?more( information)?(?=((\s*?)<(.+?)>)?`_)' + - '(?<=\[)(this )?(link|page)(?=\](\s*?)\()' + - '(?<=`)(this )?(link|page)(?=((\s*?)<(.+?)>)?`_)' diff --git a/test/docs/IBMQuantum/ListPunctuation.yml b/test/docs/IBMQuantum/ListPunctuation.yml new file mode 100644 index 000000000..c950c908b --- /dev/null +++ b/test/docs/IBMQuantum/ListPunctuation.yml @@ -0,0 +1,7 @@ +extends: existence +message: "Remove commas at end of list items (…%s)" +link: "https://github.com/IBM/ibm-quantum-style-guide/issues/22" +level: warning +scope: list +raw: + - '(\w*?),(\s*?)$' diff --git a/test/docs/IBMQuantum/OxfordComma.yml b/test/docs/IBMQuantum/OxfordComma.yml index 6e7f76c7a..9151c2892 100644 --- a/test/docs/IBMQuantum/OxfordComma.yml +++ b/test/docs/IBMQuantum/OxfordComma.yml @@ -1,6 +1,6 @@ extends: existence message: "Use the Oxford comma in '%s'." -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#N106BF' +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=punctuation-commas' level: suggestion tokens: - '(?:[^,]+,){1,}\s\w+\sand' diff --git a/test/docs/IBMQuantum/Recommend.yml b/test/docs/IBMQuantum/Recommend.yml new file mode 100644 index 000000000..08415cb27 --- /dev/null +++ b/test/docs/IBMQuantum/Recommend.yml @@ -0,0 +1,12 @@ +extends: existence +message: > + Avoid '%s' if writing about performance, benefits, or future support that + could create marketing or legal problems for IBM. Instead, describe why the + action is problematic or beneficial. +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=information-claims-recommendations' +ignorecase: true +scope: sentence +level: warning +tokens: + - recommend + - recommended diff --git a/test/docs/IBMQuantum/Terms.yml b/test/docs/IBMQuantum/Terms.yml index 6197c2a64..2178be6fb 100644 --- a/test/docs/IBMQuantum/Terms.yml +++ b/test/docs/IBMQuantum/Terms.yml @@ -1,182 +1,109 @@ extends: substitution -message: Use '%s' rather than '%s' -link: 'https://www.ibm.com/developerworks/library/styleguidelines/index.html#wordlist' +message: Use '%s' rather than '%s' for consistency across IBM Quantum. +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage' level: warning ignorecase: true action: name: replace # swap maps tokens in form of bad: good swap: - '(?:Ctrl|control)-click': press Ctrl and click - 'a lot(?: of)?': many|much - 'backward(?:-)?compatible': compatible with earlier versions - 'down(?:-)?level': earlier|previous|not at the latest level - 'mash(?: )?up': create - 'pop-up (?:blocker|killer)': software to block pop-up ad windows - 're(?:-)?occur': recur - 'sort(?:-|/)?merge': sort|merge + # Quantum-specific: See https://github.com/IBM/ibm-quantum-style-guide/issues?q=is%3Aissue+label%3Arequest + 'quasi(?:-)?probabilities': quasi-probability distribution + quasi-distribution: quasi-probability distribution - bottom: end|last - below: following - above: previous - top: start|beginning|first - a number of: several - abort: cancel|stop - administrate: administer - all caps: uppercase - and/or: a or b|a, b, or both - as long as: if|provided that - as per: according to|as|as in - back-level: earlier|previous|not at the latest level - Big Blue: IBM - blink: flash - blue screen of death: stop error - breadcrumbing: breadcrumb trail - canned: preplanned|preconfigured|predefined - case insensitive: not case-sensitive - catastrophic error: unrecoverable error - CBE: Common Base Event - CBTS: CICS BTS|BTS - cold boot: hardware restart - cold start: hardware restart - comes with: includes - componentization: component-based development|component model|component architecture|shared components - componentize: develop components - comprised of: consist of - connect with: connect to - context menu: menu|pop-up menu - contextual help: help|context-sensitive help - crash: fail|lock up|stop|stop responding - CRUD: create retrieve update and delete - customer: client - datum: data - debuggable: debug - deconfigure: unconfigure - deinstall: uninstall - deinstallation: uninstallation - demilitarized zone: DMZ - demo: demonstration - depress: press|type - deregister: unregister - desire: want|required - destroy: delete from the database - dismount: demount|unmount|remove - downgrade: upgrade|fallback|fall back|rollback|roll back - downward compatible: compatible with earlier versions - drag and drop: drag - drill up: navigate - e-fix: fix|interim fix - eFix: fix|interim fix - end user: user - end-user interface: graphical interface|interface - EUI: graphical user interface|interface - expose: display|show|make available - fill in: complete|enter|specify - fixed disk drive: hard disk drive - flavor: version|method - floppy disk: diskette|diskette drive - floppy drive: diskette|diskette drive - floppy: diskette|diskette drive - forward compatible: compatible with later versions - gzip: compress - gzipped: archive|compressed file - hard drive: hard disk|hard disk drive - hard file: hard disk|hard disk drive - hence: therefore - i-fix: interim fix - i-Fix: interim fix - IBM's: IBM's|IBM's AIX - ifix: interim fix - iFix: interim fix - in order to: to - in other words: for example|that is - in spite of: regardless of|despite - in the event: in case|if|when - inactivate: deactivate - information on: information about - information technology: IT - instead of: rather than - insure: ensure - Internet address: IP address|URL|Internet email address|web address - irrecoverable: unrecoverable - jar: compress|archive - keep in mind: remember - launch: start|open - left-hand: left - leverage: use - line cord: power cable|power cord - main directory: root directory - memory stick: USB flash drive - microcomputer: PC - motherboard: system board - mouse over: point to|move the mouse pointer over|Mouse|mouse over - network-centric computing: network computing - non-English: in languages other than English|non-English-language - nonrecoverable: unrecoverable - notion: concept - off-premise: on-premises|off-premises|onsite|offsite - offline storage: auxiliary storage - okay: OK - on ramp: access method - on the fly: dynamically|as needed|in real time|immediately - on the other hand: however|alternatively|conversely - on-premise: on-premises|off-premises|onsite|offsite - on-ramp: access method - pain point: challenge|concern|difficulty|issue - parent task: parent process - patch: fix|test fix|interim fix|fix pack|program temporary fix - perimeter network: DMZ - power down: turn on|turn off - power off: turn on|turn off - power on: turn on|turn off - preload: preinstall|preinstalled - preloaded: preinstall|preinstalled - prepend: add a prefix to - prior to: before - recommend: suggest - retry: retry|try again - right double-click: double right-click - right-hand: right - rule of thumb: rule - sanity check: test|evaluate - secondary storage: auxiliary storage - selection button: left mouse button - serial database: nonpartitioned database environment - shift-click: press Shift and click - ship: include|included - Simple Object Access Protocol: SOAP - single quote mark: single quotation mark - single quote: single quotation mark - SME routine: session management exit routine - start up: start - sunset: withdraw from service|withdraw from marketing|discontinue|no longer support - switch off: power on|turn on|power off|turn off - switch on: power on|turn on|power off|turn off - tar: compress|archive - tarball: tar file - terminate: end|stop - thru: through - thumbstick: USB flash drive - thus: therefore - toggle off: toggle - tooling: tools - touchscreen: touch-sensitive screen - transparent: indiscernible|not visible - typo: typing error|typographical error - uncheck: clear - uncompress: decompress - undeploy: remove|withdraw - unjar: extract - unselect: clear|deselect - untar: extract - unzip: unzip - upward compatible: compatible with later versions - utilize: use - versus: compared to - via: through - warning notice: attention notice - web-enable: enable for the web - webinar: webinar|webcast|web seminar|web-based event - wish: want - zero out: zero - zip: zip|compress + # General IBM: See https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage + 'backward(?:-)?compatible': compatible with earlier versions + 'mash(?: )?up': create + 'pop-up (?:blocker|killer)': software to block pop-up ad windows + 're(?:-)?occur': recur + 'sort(?:-|/)?merge': sort|merge + CRUD: create retrieve update and delete + EUI: graphical user interface|interface + abort: cancel|stop + administrate: administer + all caps: uppercase + and/or: a or b|a, b, or both + as per: according to|as|as in + case insensitive: case-insensitive + catastrophic error: unrecoverable error + comes with: includes + componentization: component-based development|component model|component architecture|shared components + componentize: develop components + crash: fail|lock up|stop|stop responding + customer: client + datum: data + debuggable: debug + deconfigure: unconfigure + deinstall: uninstall + deinstallation: uninstallation + demo: demonstration + deregister: unregister + desire: want|required + destroy: delete (data) + dismount: demount|unmount|remove + downgrade: upgrade|fallback|fall back|rollback|roll back + downward compatible: compatible with earlier versions + e-fix: fix|interim fix + eFix: fix|interim fix + end user: user + end-user interface: graphical interface|interface + expose: display|show|make available + flavor: version|method + forward compatible: compatible with later versions + gzip: compress + gzipped: archive|compressed file + hard drive: hard disk|hard disk drive + hard file: hard disk|hard disk drive + IBM's: IBM + i-fix: interim fix + ifix: interim fix + inactivate: deactivate + information on: information about + information technology: IT + internet address: IP address|URL|Internet email address|web address + irrecoverable: unrecoverable + jar: compress|archive + line cord: power cable|power cord + main directory: root directory + memory stick: USB flash drive + microcomputer: PC + motherboard: system board + network-centric computing: network computing + non-English: in languages other than English|non-English-language + nonrecoverable: unrecoverable + notion: concept + off-premise: on-premises|off-premises|onsite|offsite + okay: OK + on ramp: access method + on-premise: on-premises|off-premises|onsite|offsite + on-ramp: access method + parent task: parent process + patch: fix|test fix|interim fix|fix pack|program temporary fix + preload: preinstall|preinstalled + preloaded: preinstall|preinstalled + sanity check: test|evaluate + secondary storage: auxiliary storage + serial database: nonpartitioned database environment + set-up: setup + ship: include|included + single quote mark: single quotation mark + single quote: single quotation mark + start up: start + sunset: withdraw from service|withdraw from marketing|discontinue|no longer support + tar: compress|archive + tarball: tar file + thru: through + thumbstick: USB flash drive + tooling: tools + typo: typing error + uncompress: decompress + undeploy: remove|withdraw + unjar: extract + untar: extract + unzip: unzip + upward compatible: compatible with later versions + warning notice: attention notice + web-enable: enable for the web + webinar: webinar|webcast|web seminar|web-based event + wish: want + zero out: zero diff --git a/test/docs/IBMQuantum/UIConsistency.yml b/test/docs/IBMQuantum/UIConsistency.yml new file mode 100644 index 000000000..cc3cd179f --- /dev/null +++ b/test/docs/IBMQuantum/UIConsistency.yml @@ -0,0 +1,29 @@ +extends: substitution +message: Use '%s' rather than '%s' if writing about user interfaces. +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage' +level: warning +ignorecase: true +action: + name: replace +# swap maps tokens in form of bad: good +swap: + '(?:Ctrl|control)-click': press Ctrl and click + blink: flash + breadcrumbing: breadcrumb trail + context menu: menu|pop-up menu + contextual help: help|context-sensitive help + depress: press|type + drag and drop: drag (verb) | drag-and-drop (adjective) + drill up: navigate + fill in: complete|enter|specify + launch: start|open + left-hand: left + mouse over: point to|move the mouse pointer over|Mouse|mouse over + right double-click: double right-click + right-hand: right + selection button: left mouse button + shift-click: press Shift and click + switch off: turn off | power off + switch on: turn on | power on + uncheck: clear + unselect: clear|deselect diff --git a/test/docs/IBMQuantum/Wordiness.yml b/test/docs/IBMQuantum/Wordiness.yml new file mode 100644 index 000000000..b5fa5d1e8 --- /dev/null +++ b/test/docs/IBMQuantum/Wordiness.yml @@ -0,0 +1,29 @@ +extends: substitution +message: Try '%s' rather than '%s'. +link: 'https://ibmdocs-test.dcs.ibm.com/docs/en/ibm-style?topic=word-usage' +level: warning +ignorecase: true +action: + name: replace +# swap maps tokens in form of bad: good +swap: + 'a lot(?: of)?': many|much + a number of: several + as long as: if|provided that + comprised of: consist of + hence: therefore + in order to: to + in other words: for example|that is + in spite of: regardless of|despite + in the event: in case|if|when + keep in mind: remember + leverage: use + on the fly: dynamically|as needed|in real time|immediately + on the other hand: however|alternatively|conversely + pain point: challenge|concern|difficulty|issue + prepend: add a prefix to + prior to: before + rule of thumb: rule + terminate: end|stop + thus: therefore + utilize: use diff --git a/test/docs/dictionary.txt b/test/docs/dictionary.txt index fd278a271..ca1359e39 100644 --- a/test/docs/dictionary.txt +++ b/test/docs/dictionary.txt @@ -124,6 +124,7 @@ Schmitt Schroeter Schuld Schwarz +SciPy Shadbolt Shen Shende @@ -157,6 +158,7 @@ Vion Vivek Vojtech Volkoff +Watrous Weedbrook Wierichs Woerner @@ -251,3 +253,4 @@ transpiling tridiagonal trits unitaries +untrusted From ccd41a529f065fd0c50087a8238f44c986bfbe7c Mon Sep 17 00:00:00 2001 From: Rebecca Dimock <66339736+beckykd@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:14:23 -0500 Subject: [PATCH 03/22] misc session updates (#1175) * misc session updates * fix SciPy * edit * only option 1 * fix link * fix links --------- Co-authored-by: Kevin Tian --- docs/how_to/error-suppression.rst | 31 ++++++++-------------- docs/how_to/run_session.rst | 22 +++++++++------- docs/migrate/migrate-tuning.rst | 19 +++++++------- docs/sessions.rst | 43 ++++++++++++++++++++++++++++--- 4 files changed, 72 insertions(+), 43 deletions(-) diff --git a/docs/how_to/error-suppression.rst b/docs/how_to/error-suppression.rst index 04132cad1..71cb45834 100644 --- a/docs/how_to/error-suppression.rst +++ b/docs/how_to/error-suppression.rst @@ -8,10 +8,13 @@ Error suppression typically results in some classical pre-processing overhead to Primitives let you employ error suppression techniques by setting the optimization level (``optimization_level`` option) and by choosing advanced transpilation options. Setting the optimization level ------------------------------- +------------------------------- The ``optimization_level`` setting specifies how much optimization to perform on the circuits. Higher levels generate more optimized circuits, at the expense of longer transpilation times. +..note:: + When using primitives, optimization levels 2 and 3 behave like level 1. + +--------------------+---------------------------------------------------------------------------------------------------+ | Optimization Level | Estimator & Sampler | +====================+===================================================================================================+ @@ -22,7 +25,7 @@ The ``optimization_level`` setting specifies how much optimization to perform on | | - routing (stochastic swaps) | | | | +--------------------+---------------------------------------------------------------------------------------------------+ -| 1 | Light optimization: | +| 1, 2, 3 | Light optimization: | | | | | | - Layout (trivial → vf2 → SabreLayout if routing is required) | | | - routing (SabreSWAPs if needed) | @@ -30,21 +33,9 @@ The ``optimization_level`` setting specifies how much optimization to perform on | | - Error Suppression: Dynamical Decoupling | | | | +--------------------+---------------------------------------------------------------------------------------------------+ -| 2 | Medium optimization: | -| | | -| | - Layout/Routing: Optimization level 1 (without trivial) + heuristic optimized with greater | -| | search depth and trials of optimization function | -| | - commutative cancellation | -| | - Error Suppression: Dynamical Decoupling | -| | | -+--------------------+---------------------------------------------------------------------------------------------------+ -| 3 (default) | High Optimization: | -| | | -| | * Optimization level 2 + heuristic optimized on layout/routing further with greater effort/trials | -| | * 2 qubit KAK optimization | -| | * Error Suppression: Dynamical Decoupling | -| | | -+--------------------+---------------------------------------------------------------------------------------------------+ + +..note:: + If you want to use more advanced optimization, use the Qiskit transpiler locally and then pass the transpiled circuits to the primitives. For instructions see the `Submitting user-transpiled circuits using primitives `__ tutorial. Example: configure Estimator with optimization levels ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -56,7 +47,7 @@ Example: configure Estimator with optimization levels from qiskit.quantum_info import SparsePauliOp service = QiskitRuntimeService() - options = Options(optimization_level=2) + options = Options(optimization_level=1) psi = RealAmplitudes(num_qubits=2, reps=2) H = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)]) @@ -68,7 +59,7 @@ Example: configure Estimator with optimization levels psi1_H1 = job.result() .. note:: - If optimization level is not specified, the service uses ``optimization_level = 3``. + If optimization level is not specified, the service uses ``optimization_level = 1``. Example: configure Sampler with optimization levels ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -78,7 +69,7 @@ Example: configure Sampler with optimization levels from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Options service = QiskitRuntimeService() - options = Options(optimization_level=3) + options = Options(optimization_level=1) with Session(service=service, backend="ibmq_qasm_simulator") as session: sampler = Sampler(session=session, options=options) diff --git a/docs/how_to/run_session.rst b/docs/how_to/run_session.rst index 1339e983a..9511f74b0 100644 --- a/docs/how_to/run_session.rst +++ b/docs/how_to/run_session.rst @@ -20,7 +20,7 @@ Open a session You can open a runtime session by using the context manager `with Session(…)` or by initializing the `Session` class. When you start a session, you can specify options, such as the backend to run on. This topic describes the most commonly used options. For the full list, see the `Sessions API documentation `__. .. important:: - Data from the first session job is cached and used by subsequent jobs. Therefore, if the first job is cancelled, subsequent session jobs will all fail. + If the first session job is canceled, subsequent session jobs will all fail. **Session class** @@ -50,21 +50,23 @@ When you start a session, you can specify session options, such as the backend t There are two ways to specify a backend in a session: -**Directly specify a string with the backend name.** Example: +**Directly specify a string with the backend name.** - .. code-block:: python +Example: + +.. code-block:: python - backend = "ibmq_qasm_simulator" - with Session(backend=backend): - ... + service = QiskitRuntimeService() + with Session(service=service, backend="ibmq_qasm_simulator"): + ... **Pass the backend object.** Example: - .. code-block:: python +.. code-block:: python - backend = service.get_backend("ibmq_qasm_simulator") - with Session(backend=backend): - ... + backend = service.get_backend("ibmq_qasm_simulator") + with Session(backend=backend): + ... .. _session_length: diff --git a/docs/migrate/migrate-tuning.rst b/docs/migrate/migrate-tuning.rst index 9f29966c5..40527d8e0 100644 --- a/docs/migrate/migrate-tuning.rst +++ b/docs/migrate/migrate-tuning.rst @@ -55,23 +55,24 @@ For more information about the primitive options, refer to the 2. Transpilation ~~~~~~~~~~~~~~~~ -By default, the Qiskit Runtime primitives perform circuit transpilation. There are several optimization -levels you can choose from. These levels affect the transpilation strategy and might include additional error -suppression mechanisms. Level 0 only involves basic transpilation. +By default, the Qiskit Runtime primitives perform circuit transpilation. The optimization level you choose affects the transpilation strategy and might include additional error suppression mechanisms. Level 0 only involves basic transpilation. To learn about each optimization level, view the Optimization level table in the `Error suppression topic `__. +.. note:: + When using primitives, optimization levels 2 and 3 behave like level 1. If you want to use more advanced optimization, use the Qiskit transpiler locally and then pass the transpiled circuits to the primitives. For instructions see the `Submitting user-transpiled circuits using primitives `__ tutorial. + The optimization level option is a "first level option", and can be set as follows: .. code-block:: python from qiskit_ibm_runtime import Estimator, Options - options = Options(optimization_level=2) + options = Options(optimization_level=1) # or.. options = Options() - options.optimization_level = 2 + options.optimization_level = 1 estimator = Estimator(session=session, options=options) @@ -92,12 +93,10 @@ options you can set up. These are "second level options", and can be set as foll For more information, and a complete list of advanced transpilation options, see the Advanced transpilation options table in the `Error suppression topic `__. -Finally, you might want to specify settings that are not available through the primitives interface, -or use custom transpiler passes. In these cases, you can set ``skip_transpilation=True`` to submit -user-transpiled circuits. To learn how this is done, refer to the +To specify settings that are not available through the primitives interface or use custom transpiler passes, set ``skip_transpilation=True`` to submit user-transpiled circuits. This is described in the `Submitting user-transpiled circuits using primitives tutorial `_. -The ``skip_transpilation`` option is an advanced transpilation option, set as follows: +The ``skip_transpilation`` option is an advanced transpilation option, and is set as follows: .. code-block:: python @@ -123,7 +122,7 @@ The configuration is similar to the other options: from qiskit_ibm_runtime import Estimator, Options - options = Options(resilience_level = 2) + options = Options(resilience_level = ) # or... diff --git a/docs/sessions.rst b/docs/sessions.rst index ba28143c8..3a986d077 100644 --- a/docs/sessions.rst +++ b/docs/sessions.rst @@ -19,7 +19,7 @@ There are several benefits to using sessions: .. note:: * The queuing time does not decrease for the first job submitted within a session. Therefore, a session does not provide any benefits if you only need to run a single job. - * Since data from the first session job is cached and used by subsequent jobs, if the first job is cancelled, subsequent session jobs will all fail. + * If the first session job is cancelled, subsequent session jobs will all fail. * When using sessions, the uncertainty around queuing time is significantly reduced. This allows better estimation of a workload's total runtime and better resource management. * In a device characterization context, being able to run experiments closely together helps prevent device drifts and provide more accurate results. @@ -117,7 +117,7 @@ Iterative Any session job submitted within the five-minute interactive timeout, also known as interactive time to live (ITTL), is processed immediately. This allows some time for variational algorithms, such as VQE, to perform classical post-processing. -- The quantum device is locked to the session user unless the TTL is reached. +- When a session is active, its jobs get priority until ITTL or max timeout is reached. - Post-processing could be done anywhere, such as a personal computer, cloud service, or an HPC environment. .. image:: images/iterative.png @@ -125,19 +125,56 @@ Any session job submitted within the five-minute interactive timeout, also known .. note:: There might be a limit imposed on the ITTL value depending on whether your hub is Premium, Open, and so on. +This is an example of running an iterative workload that uses the classical SciPy optimizer to minimize a cost function. In this model, SciPy uses the output of the cost function to calculate its next input. + +.. code-block:: python + + def cost_func(params, ansatz, hamiltonian, estimator): + # Return estimate of energy from estimator + + energy = estimator.run(ansatz, hamiltonian, parameter_values=params).result().values[0] + return energy + + x0 = 2 * np.pi * np.random.random(num_params) + + session = Session(backend=backend) + + estimator = Estimator(session=session, options={"shots": int(1e4)}) + res = minimize(cost_func, x0, args=(ansatz, hamiltonian, estimator), method="cobyla") + + # Close the session because we didn't use a context manager. + session.close() + + Batch +++++++++++++++++++++ Ideal for running experiments closely together to avoid device drifts, that is, to maintain device characterization. - Suitable for batching many jobs together. -- Jobs that fit within the maximum session time run back-to-back on hardware. +- The classical computation, such as compilation, of the jobs is run in parallel. This means running multiple jobs in a batch would be significantly faster than running them serially. + .. note:: When batching, jobs are not guaranteed to run in the order they are submitted. .. image:: images/batch.png +The following example shows how you can divide up a long list of circuits into multiple jobs and run them as a batch to take advantage of the parallel processing. + +.. code-block:: python + + backend = service.backend("ibm_sherbrooke") + + with Session(backend=backend): + estimator = Estimator() + start_idx = 0 + jobs = [] + while start_idx < len(circuits): + end_idx = start_idx + backend.max_circuits + jobs.append(estimator.run(circuits[start_idx:end_idx], obs[start_idx:end_idx], params[start_idx:end_idx])) + start_idx = end_idx + Sessions and reservations ------------------------- From 33981d4fbb00b09378db20781e9e1c2c1dc942b1 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Tue, 31 Oct 2023 17:10:12 -0400 Subject: [PATCH 04/22] Fail unit tests on deprecation warning (#1182) --- test/ibm_test_case.py | 3 +++ test/unit/test_data_serialization.py | 23 ++++++++--------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/test/ibm_test_case.py b/test/ibm_test_case.py index 2878597d4..ac91782a6 100644 --- a/test/ibm_test_case.py +++ b/test/ibm_test_case.py @@ -17,6 +17,7 @@ import logging import inspect import unittest +import warnings from contextlib import suppress from collections import defaultdict from typing import DefaultDict, Dict @@ -45,6 +46,8 @@ def setUpClass(cls): filename = "%s.log" % os.path.splitext(inspect.getfile(cls))[0] setup_test_logging(cls.log, filename) cls._set_logging_level(logging.getLogger(QISKIT_IBM_RUNTIME_LOGGER_NAME)) + # fail test on deprecation warnings from qiskit + warnings.filterwarnings("error", category=DeprecationWarning, module="qiskit") @classmethod def _set_logging_level(cls, logger: logging.Logger) -> None: diff --git a/test/unit/test_data_serialization.py b/test/unit/test_data_serialization.py index 604b78cb1..91e6ea8da 100644 --- a/test/unit/test_data_serialization.py +++ b/test/unit/test_data_serialization.py @@ -96,15 +96,10 @@ def test_coder_operators(self): # filter warnings triggered by opflow imports with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", category=DeprecationWarning, module=r"qiskit\.opflow\." - ) + warnings.filterwarnings("ignore", category=DeprecationWarning) from qiskit.opflow import PauliSumOp # pylint: disable=import-outside-toplevel - # catch warnings triggered by opflow use - with warnings.catch_warnings(record=True) as w_log: deprecated_op = PauliSumOp(SparsePauliOp(Pauli("XYZX"), coeffs=[2])) - self.assertTrue(len(w_log) > 0) coeff_x = Parameter("x") coeff_y = coeff_x + 1 @@ -124,14 +119,7 @@ def test_coder_operators(self): with warnings.catch_warnings(): # filter warnings triggered by opflow imports # in L146 of utils/json.py - warnings.filterwarnings( - "ignore", category=DeprecationWarning, module=r"qiskit\.opflow\." - ) - warnings.filterwarnings( - "ignore", - category=DeprecationWarning, - module=r"qiskit_ibm_runtime\.utils\.json", - ) + warnings.filterwarnings("ignore", category=DeprecationWarning) decoded = json.loads(encoded, cls=RuntimeDecoder) self.assertEqual(operator, decoded) @@ -141,7 +129,12 @@ def test_coder_noise_model(self): self.assertIsInstance(noise_model, NoiseModel) encoded = json.dumps(noise_model, cls=RuntimeEncoder) self.assertIsInstance(encoded, str) - decoded = json.loads(encoded, cls=RuntimeDecoder) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + ) + decoded = json.loads(encoded, cls=RuntimeDecoder) self.assertIsInstance(decoded, NoiseModel) self.assertEqual(noise_model.noise_qubits, decoded.noise_qubits) self.assertEqual(noise_model.noise_instructions, decoded.noise_instructions) From f9b4c2c33569c34b86754322fb40020bc4f073b3 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Tue, 31 Oct 2023 18:30:18 -0400 Subject: [PATCH 05/22] Update qiskit-ibm-provider version 0.7.2 (#1186) --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3d318464e..3697f71fd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,4 @@ python-dateutil>=2.8.0 websocket-client>=1.5.1 typing-extensions>=4.0.0 ibm-platform-services>=0.22.6 -qiskit-ibm-provider>=0.7.0 +qiskit-ibm-provider>=0.7.2 diff --git a/setup.py b/setup.py index e89854899..cd35c8e70 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ "python-dateutil>=2.8.0", "websocket-client>=1.5.1", "ibm-platform-services>=0.22.6", - "qiskit-ibm-provider>=0.7.0", + "qiskit-ibm-provider>=0.7.2", ] # Handle version. From c15763ad655d9deb02650909c672b2dafd3443c2 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 2 Nov 2023 09:59:02 -0400 Subject: [PATCH 06/22] Add deprecation warnings for everything related to custom programs (#1153) * add deprecation warnings for custom programs * add msg in run() * fix lint * remove test_programs.py * update deprecation message * add reno --- qiskit_ibm_runtime/qiskit_runtime_service.py | 97 +++++++ ...cate-custom-programs-274e6ea20d1027a1.yaml | 9 + test/unit/test_programs.py | 249 ------------------ 3 files changed, 106 insertions(+), 249 deletions(-) create mode 100644 releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml delete mode 100644 test/unit/test_programs.py diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index b6799de2a..c2a14297e 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -812,6 +812,18 @@ def pprint_programs( value of 20. skip: The number of programs to skip. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) programs = self.programs(refresh, limit, skip) for prog in programs: print("=" * 50) @@ -840,6 +852,18 @@ def programs( Returns: A list of runtime programs. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) if skip is None: skip = 0 if not self._programs or refresh: @@ -884,6 +908,18 @@ def program(self, program_id: str, refresh: bool = False) -> RuntimeProgram: RuntimeProgramNotFound: If the program does not exist. IBMRuntimeError: If the request failed. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) if program_id not in self._programs or refresh: try: response = self._api_client.program_get(program_id) @@ -971,6 +1007,19 @@ def run( RuntimeProgramNotFound: If the program cannot be found. IBMRuntimeError: An error occurred running the program. """ + if program_id not in ["sampler", "estimator", "circuit-runner", "qasm3-runner"]: + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) qrt_options: RuntimeOptions = options if options is None: qrt_options = RuntimeOptions() @@ -1093,6 +1142,18 @@ def upload_program(self, data: str, metadata: Optional[Union[Dict, str]] = None) IBMNotAuthorizedError: If you are not authorized to upload programs. IBMRuntimeError: If the upload failed. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) program_metadata = self._read_metadata(metadata=metadata) for req in ["name", "max_execution_time"]: @@ -1176,6 +1237,18 @@ def update_program( RuntimeProgramNotFound: If the program doesn't exist. IBMRuntimeError: If the request failed. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) if not any([data, metadata, name, description, max_execution_time, spec]): warnings.warn( "None of the 'data', 'metadata', 'name', 'description', " @@ -1240,6 +1313,18 @@ def delete_program(self, program_id: str) -> None: RuntimeProgramNotFound: If the program doesn't exist. IBMRuntimeError: If the request failed. """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) try: self._api_client.program_delete(program_id=program_id) except RequestsApiError as ex: @@ -1262,6 +1347,18 @@ def set_program_visibility(self, program_id: str, public: bool) -> None: RuntimeProgramNotFound: if program not found (404) IBMRuntimeError: if update failed (401, 403) """ + warnings.warn( + ( + "Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will " + "be removed on November 27, 2023. You can instead convert your custom programs " + "to use Qiskit Runtime primitives with Quantum Serverless. Refer to the migration " + "guide for instructions: " + "https://qiskit-extensions.github.io/quantum-serverless/migration" + "/migration_from_qiskit_runtime_programs.html" + ), + DeprecationWarning, + stacklevel=2, + ) try: self._api_client.set_program_visibility(program_id, public) except RequestsApiError as ex: diff --git a/releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml b/releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml new file mode 100644 index 000000000..dd8ec9d4e --- /dev/null +++ b/releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml @@ -0,0 +1,9 @@ +--- +deprecations: + - | + Custom programs are being deprecated as of qiskit-ibm-runtime 0.14.0 and will be + removed on November 27, 2023. Users can instead convert their custom programs to use + Qiskit Runtime primitives with Quantum Serverless. Refer to the migration guide for + instructions: + https://qiskit-extensions.github.io/quantum-serverless/migration/migration_from_qiskit_runtime_programs.html + diff --git a/test/unit/test_programs.py b/test/unit/test_programs.py deleted file mode 100644 index f58e08393..000000000 --- a/test/unit/test_programs.py +++ /dev/null @@ -1,249 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2021. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Tests for program related runtime functions.""" - -import copy -import json -import os -import tempfile -import warnings -from io import StringIO -from unittest.mock import patch - -from qiskit_ibm_runtime.exceptions import IBMInputValueError -from qiskit_ibm_runtime.exceptions import RuntimeProgramNotFound -from qiskit_ibm_runtime.runtime_program import ParameterNamespace -from ..ibm_test_case import IBMTestCase -from ..decorators import run_quantum_and_cloud_fake -from ..program import upload_program, DEFAULT_DATA, DEFAULT_METADATA - - -class TestPrograms(IBMTestCase): - """Class for testing runtime modules.""" - - @run_quantum_and_cloud_fake - def test_list_programs(self, service): - """Test listing programs.""" - program_id = upload_program(service) - programs = service.programs() - all_ids = [prog.program_id for prog in programs] - self.assertIn(program_id, all_ids) - - @run_quantum_and_cloud_fake - def test_list_programs_with_limit_skip(self, service): - """Test listing programs with limit and skip.""" - program_ids = [] - for _ in range(3): - program_ids.append(upload_program(service)) - programs = service.programs(limit=2, skip=1) - all_ids = [prog.program_id for prog in programs] - self.assertNotIn(program_ids[0], all_ids) - self.assertIn(program_ids[1], all_ids) - self.assertIn(program_ids[2], all_ids) - programs = service.programs(limit=3) - all_ids = [prog.program_id for prog in programs] - self.assertIn(program_ids[0], all_ids) - - @run_quantum_and_cloud_fake - def test_list_program(self, service): - """Test listing a single program.""" - program_id = upload_program(service) - program = service.program(program_id) - self.assertEqual(program_id, program.program_id) - - @run_quantum_and_cloud_fake - def test_print_programs(self, service): - """Test printing programs.""" - ids = [] - for idx in range(3): - ids.append(upload_program(service, name=f"name_{idx}")) - - programs = service.programs() - with patch("sys.stdout", new=StringIO()) as mock_stdout: - service.pprint_programs() - stdout = mock_stdout.getvalue() - for prog in programs: - self.assertIn(prog.program_id, stdout) - self.assertIn(prog.name, stdout) - self.assertNotIn(str(prog.max_execution_time), stdout) - self.assertNotIn("Backend requirements", stdout) - service.pprint_programs(detailed=True) - stdout_detailed = mock_stdout.getvalue() - for prog in programs: - self.assertIn(prog.program_id, stdout_detailed) - self.assertIn(prog.name, stdout_detailed) - self.assertIn(str(prog.max_execution_time), stdout_detailed) - self.assertIn("Backend requirements", stdout_detailed) - - @run_quantum_and_cloud_fake - def test_upload_program(self, service): - """Test uploading a program.""" - max_execution_time = 3000 - is_public = True - program_id = upload_program( - service=service, max_execution_time=max_execution_time, is_public=is_public - ) - self.assertTrue(program_id) - program = service.program(program_id) - self.assertTrue(program) - self.assertEqual(max_execution_time, program.max_execution_time) - self.assertEqual(program.is_public, is_public) - - @run_quantum_and_cloud_fake - def test_update_program(self, service): - """Test updating program.""" - new_data = "def main() {foo=bar}" - new_metadata = copy.deepcopy(DEFAULT_METADATA) - new_metadata["name"] = "test_update_program" - new_name = "name2" - new_description = "some other description" - new_cost = DEFAULT_METADATA["max_execution_time"] + 100 - new_spec = copy.deepcopy(DEFAULT_METADATA["spec"]) - new_spec["backend_requirements"] = {"input_allowed": "runtime"} - - sub_tests = [ - {"data": new_data}, - {"metadata": new_metadata}, - {"data": new_data, "metadata": new_metadata}, - {"metadata": new_metadata, "name": new_name}, - { - "data": new_data, - "metadata": new_metadata, - "description": new_description, - }, - {"max_execution_time": new_cost, "spec": new_spec}, - ] - - for new_vals in sub_tests: - with self.subTest(new_vals=new_vals.keys()): - program_id = upload_program(service) - service.update_program(program_id=program_id, **new_vals) - updated = service.program(program_id, refresh=True) - if "data" in new_vals: - raw_program = service._api_client.program_get(program_id) - self.assertEqual(new_data, raw_program["data"]) - if "metadata" in new_vals and "name" not in new_vals: - self.assertEqual(new_metadata["name"], updated.name) - if "name" in new_vals: - self.assertEqual(new_name, updated.name) - if "description" in new_vals: - self.assertEqual(new_description, updated.description) - if "max_execution_time" in new_vals: - self.assertEqual(new_cost, updated.max_execution_time) - if "spec" in new_vals: - raw_program = service._api_client.program_get(program_id) - self.assertEqual(new_spec, raw_program["spec"]) - - @run_quantum_and_cloud_fake - def test_update_program_no_new_fields(self, service): - """Test updating a program without any new data.""" - program_id = upload_program(service) - with warnings.catch_warnings(record=True) as warn_cm: - service.update_program(program_id=program_id) - self.assertEqual(len(warn_cm), 1) - - @run_quantum_and_cloud_fake - def test_update_phantom_program(self, service): - """Test updating a phantom program.""" - with self.assertRaises(RuntimeProgramNotFound): - service.update_program("phantom_program", name="foo") - - @run_quantum_and_cloud_fake - def test_delete_program(self, service): - """Test deleting program.""" - program_id = upload_program(service) - service.delete_program(program_id) - with self.assertRaises(RuntimeProgramNotFound): - service.program(program_id, refresh=True) - - @run_quantum_and_cloud_fake - def test_double_delete_program(self, service): - """Test deleting a deleted program.""" - program_id = upload_program(service) - service.delete_program(program_id) - with self.assertRaises(RuntimeProgramNotFound): - service.delete_program(program_id) - - @run_quantum_and_cloud_fake - def test_retrieve_program_data(self, service): - """Test retrieving program data""" - program_id = upload_program(service, name="qiskit-test") - service.programs() - program = service.program(program_id) - self.assertEqual(program.data, DEFAULT_DATA) - self._validate_program(program) - - @run_quantum_and_cloud_fake - def test_program_params_validation(self, service): - """Test program parameters validation process""" - program_id = upload_program(service) - program = service.program(program_id) - params: ParameterNamespace = program.parameters() - params.param1 = "Hello, World" - # Check OK params - params.validate() - # Check OK params - contains unnecessary param - params.param3 = "Hello, World" - params.validate() - # Check bad params - missing required param - params.param1 = None - with self.assertRaises(IBMInputValueError): - params.validate() - params.param1 = "foo" - - @run_quantum_and_cloud_fake - def test_program_metadata(self, service): - """Test program metadata.""" - temp_fp = tempfile.NamedTemporaryFile(mode="w+", delete=False) - json.dump(DEFAULT_METADATA, temp_fp) - temp_fp.close() - - sub_tests = [temp_fp.name, DEFAULT_METADATA] - try: - for metadata in sub_tests: - with self.subTest(metadata_type=type(metadata)): - program_id = service.upload_program(data=DEFAULT_DATA, metadata=metadata) - program = service.program(program_id) - service.delete_program(program_id) - self._validate_program(program) - finally: - os.remove(temp_fp.name) - - @run_quantum_and_cloud_fake - def test_set_program_visibility(self, service): - """Test setting program visibility.""" - program_id = upload_program(service, is_public=False) - service.set_program_visibility(program_id, True) - program = service.program(program_id) - self.assertTrue(program.is_public) - - @run_quantum_and_cloud_fake - def test_set_program_visibility_phantom_program(self, service): - """Test setting program visibility for a phantom program.""" - with self.assertRaises(RuntimeProgramNotFound): - service.set_program_visibility("foo", True) - - def _validate_program(self, program): - """Validate a program.""" - self.assertEqual(DEFAULT_METADATA["name"], program.name) - self.assertEqual(DEFAULT_METADATA["description"], program.description) - self.assertEqual(DEFAULT_METADATA["max_execution_time"], program.max_execution_time) - self.assertTrue(program.creation_date) - self.assertTrue(program.update_date) - self.assertEqual( - DEFAULT_METADATA["spec"]["backend_requirements"], - program.backend_requirements, - ) - self.assertEqual(DEFAULT_METADATA["spec"]["parameters"], program.parameters().metadata) - self.assertEqual(DEFAULT_METADATA["spec"]["return_values"], program.return_values) - self.assertEqual(DEFAULT_METADATA["spec"]["interim_results"], program.interim_results) From a8dc25d6b41cca91484343558de3629dc018fdb8 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 2 Nov 2023 11:50:35 -0400 Subject: [PATCH 07/22] Create batch class (#1183) * Create batch class * add simple reno and test --- qiskit_ibm_runtime/__init__.py | 1 + qiskit_ibm_runtime/batch.py | 21 +++++++++++ .../notes/batch-class-b7a3befcfce8860e.yaml | 6 ++++ test/unit/test_batch.py | 35 +++++++++++++++++++ 4 files changed, 63 insertions(+) create mode 100644 qiskit_ibm_runtime/batch.py create mode 100644 releasenotes/notes/batch-class-b7a3befcfce8860e.yaml create mode 100644 test/unit/test_batch.py diff --git a/qiskit_ibm_runtime/__init__.py b/qiskit_ibm_runtime/__init__.py index 07096f3eb..bb3963f49 100644 --- a/qiskit_ibm_runtime/__init__.py +++ b/qiskit_ibm_runtime/__init__.py @@ -181,6 +181,7 @@ def result_callback(job_id, result): from .runtime_options import RuntimeOptions from .utils.json import RuntimeEncoder, RuntimeDecoder from .session import Session # pylint: disable=cyclic-import +from .batch import Batch # pylint: disable=cyclic-import from .exceptions import * from .utils.utils import setup_logger diff --git a/qiskit_ibm_runtime/batch.py b/qiskit_ibm_runtime/batch.py new file mode 100644 index 000000000..6947ddc81 --- /dev/null +++ b/qiskit_ibm_runtime/batch.py @@ -0,0 +1,21 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Qiskit Runtime batch mode.""" + +from .session import Session + + +class Batch(Session): + """Class for creating a batch mode in Qiskit Runtime.""" + + pass diff --git a/releasenotes/notes/batch-class-b7a3befcfce8860e.yaml b/releasenotes/notes/batch-class-b7a3befcfce8860e.yaml new file mode 100644 index 000000000..8e2bf7e06 --- /dev/null +++ b/releasenotes/notes/batch-class-b7a3befcfce8860e.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + There is a new class, :class:`qiskit_ibm_runtime.Batch` that currently works + the same way as :class:`qiskit_ibm_runtime.Session` but will later be updated + to better support submitting multiple jobs at once. diff --git a/test/unit/test_batch.py b/test/unit/test_batch.py new file mode 100644 index 000000000..a27eca479 --- /dev/null +++ b/test/unit/test_batch.py @@ -0,0 +1,35 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Tests for Batch class.""" + +from unittest.mock import patch + +from qiskit_ibm_runtime import Batch +import qiskit_ibm_runtime.session as session_pkg +from ..ibm_test_case import IBMTestCase + + +class TestBatch(IBMTestCase): + """Class for testing the Session class.""" + + def tearDown(self) -> None: + super().tearDown() + session_pkg._DEFAULT_SESSION.set(None) + + @patch("qiskit_ibm_runtime.session.QiskitRuntimeService", autospec=True) + def test_default_batch(self, mock_service): + """Test using default batch mode.""" + mock_service.global_service = None + batch = Batch(backend="ibm_gotham") + self.assertIsNotNone(batch.service) + mock_service.assert_called_once() From 9c443f9c0fe12ace0ad10f128fd51f356f2ae0bb Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 2 Nov 2023 13:19:03 -0400 Subject: [PATCH 08/22] Prepare release 0.14.0 (#1188) --- docs/conf.py | 2 +- docs/index.rst | 2 +- qiskit_ibm_runtime/VERSION.txt | 2 +- releasenotes/notes/{ => 0.14}/batch-class-b7a3befcfce8860e.yaml | 0 .../{ => 0.14}/deprecate-custom-programs-274e6ea20d1027a1.yaml | 0 .../{ => 0.14}/remove_kwargs_options-9024d3ec6572a53e.yaml | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename releasenotes/notes/{ => 0.14}/batch-class-b7a3befcfce8860e.yaml (100%) rename releasenotes/notes/{ => 0.14}/deprecate-custom-programs-274e6ea20d1027a1.yaml (100%) rename releasenotes/notes/{ => 0.14}/remove_kwargs_options-9024d3ec6572a53e.yaml (100%) diff --git a/docs/conf.py b/docs/conf.py index 9c89d3c49..8d249c408 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -42,7 +42,7 @@ # The short X.Y version version = '' # The full version, including alpha/beta/rc tags -release = '0.13.1' +release = '0.14.0' docs_url_prefix = "ecosystem/ibm-runtime" diff --git a/docs/index.rst b/docs/index.rst index f1a57983e..43060ade5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,5 @@ ######################################### -Qiskit Runtime 0.13.0 documentation +Qiskit Runtime 0.14.0 documentation ######################################### Overview diff --git a/qiskit_ibm_runtime/VERSION.txt b/qiskit_ibm_runtime/VERSION.txt index c317a9189..a803cc227 100644 --- a/qiskit_ibm_runtime/VERSION.txt +++ b/qiskit_ibm_runtime/VERSION.txt @@ -1 +1 @@ -0.13.1 +0.14.0 diff --git a/releasenotes/notes/batch-class-b7a3befcfce8860e.yaml b/releasenotes/notes/0.14/batch-class-b7a3befcfce8860e.yaml similarity index 100% rename from releasenotes/notes/batch-class-b7a3befcfce8860e.yaml rename to releasenotes/notes/0.14/batch-class-b7a3befcfce8860e.yaml diff --git a/releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml b/releasenotes/notes/0.14/deprecate-custom-programs-274e6ea20d1027a1.yaml similarity index 100% rename from releasenotes/notes/deprecate-custom-programs-274e6ea20d1027a1.yaml rename to releasenotes/notes/0.14/deprecate-custom-programs-274e6ea20d1027a1.yaml diff --git a/releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml b/releasenotes/notes/0.14/remove_kwargs_options-9024d3ec6572a53e.yaml similarity index 100% rename from releasenotes/notes/remove_kwargs_options-9024d3ec6572a53e.yaml rename to releasenotes/notes/0.14/remove_kwargs_options-9024d3ec6572a53e.yaml From 1a5c215d2e9944fc3de92875473345abd00f1c64 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 2 Nov 2023 13:37:58 -0400 Subject: [PATCH 09/22] update main branch 0.14.1 (#1189) --- docs/conf.py | 2 +- qiskit_ibm_runtime/VERSION.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 8d249c408..3363afd94 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -42,7 +42,7 @@ # The short X.Y version version = '' # The full version, including alpha/beta/rc tags -release = '0.14.0' +release = '0.14.1' docs_url_prefix = "ecosystem/ibm-runtime" diff --git a/qiskit_ibm_runtime/VERSION.txt b/qiskit_ibm_runtime/VERSION.txt index a803cc227..930e3000b 100644 --- a/qiskit_ibm_runtime/VERSION.txt +++ b/qiskit_ibm_runtime/VERSION.txt @@ -1 +1 @@ -0.14.0 +0.14.1 From f751ffc8b43b62d334f53942285a53e6b63fb84e Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Mon, 6 Nov 2023 10:26:26 -0500 Subject: [PATCH 10/22] Skip tests (#1197) --- test/unit/test_data_serialization.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/unit/test_data_serialization.py b/test/unit/test_data_serialization.py index 91e6ea8da..8011c6eff 100644 --- a/test/unit/test_data_serialization.py +++ b/test/unit/test_data_serialization.py @@ -17,6 +17,7 @@ import subprocess import tempfile import warnings +from unittest import skip from datetime import datetime import numpy as np @@ -91,6 +92,7 @@ def test_coder_qc(self): decoded = [decoded] self.assertTrue(all(isinstance(item, QuantumCircuit) for item in decoded)) + @skip("Skip until qiskit-ibm-provider/736 is merged") def test_coder_operators(self): """Test runtime encoder and decoder for operators.""" @@ -165,6 +167,7 @@ def test_encoder_ndarray(self): decoded = json.loads(encoded, cls=RuntimeDecoder) self.assertTrue(np.array_equal(decoded["ndarray"], obj["ndarray"])) + @skip("Skip until qiskit-ibm-provider/736 is merged") def test_encoder_instruction(self): """Test encoding and decoding instructions""" subtests = ( From d5e26abce2d8ab96c45098415a3ec9d3ce05f40e Mon Sep 17 00:00:00 2001 From: Arnau Casau <47946624+arnaucasau@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:18:40 +0100 Subject: [PATCH 11/22] Fix templates documentation (#1162) ### Summary This PR fixes the problem of having duplicated methods when a class has inherited_members. In the original `class.rst`, we have two consecutive for, one for all methods, and another one for only the inherited_members. With this two for, we are duplicating the entries of the inherited methods (they appear two times in the page). --- docs/_templates/autosummary/class.rst | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/docs/_templates/autosummary/class.rst b/docs/_templates/autosummary/class.rst index 6f917320a..471c0f6c8 100644 --- a/docs/_templates/autosummary/class.rst +++ b/docs/_templates/autosummary/class.rst @@ -12,34 +12,25 @@ :no-inherited-members: :no-special-members: - {% block attributes_summary %} +{% block attributes_summary %} {% if attributes %} - .. rubric:: Attributes - - {% for item in all_attributes %} - {%- if not item.startswith('_') %} + {% for item in all_attributes %} + {%- if not item.startswith('_') %} .. autoattribute:: {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} + {%- endif -%} + {%- endfor %} {% endif %} - {% endblock %} +{% endblock %} - {% block methods_summary %} +{% block methods_summary %} {% if methods %} - .. rubric:: Methods - {% for item in all_methods %} {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} - {% for item in inherited_members %} - {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - .. automethod:: {{ name }}.{{ item }} - {%- endif -%} - {%- endfor %} {% endif %} - {% endblock %} +{% endblock %} From 198974f446b4ac67bfe042c15d3d71dbe9333531 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Tue, 7 Nov 2023 11:27:05 -0500 Subject: [PATCH 12/22] Update backend serialization test (#1200) --- test/integration/test_backend_serialization.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/integration/test_backend_serialization.py b/test/integration/test_backend_serialization.py index 7415c757e..f17b9c494 100644 --- a/test/integration/test_backend_serialization.py +++ b/test/integration/test_backend_serialization.py @@ -17,14 +17,13 @@ import dateutil.parser from ..ibm_test_case import IBMIntegrationTestCase -from ..decorators import ( - run_integration_test, -) +from ..decorators import run_integration_test, production_only class TestSerialization(IBMIntegrationTestCase): """Test data serialization.""" + @production_only @run_integration_test def test_backend_configuration(self, service): """Test deserializing backend configuration.""" From 1fccd8e20aa4bb67f9ed3d60013ba1c69488ce16 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Wed, 8 Nov 2023 00:14:04 +0200 Subject: [PATCH 13/22] Support IBMBackend.run() (#1138) * initial support for backend.run() * Added temporary session support * Copied test_backend.py from the provider * Added all status types from the provider * Added test_ibm_job_states.py from provider. Added 'transpiler' directory to support convert_id_to_delay * lint * black * lint * Added integration tests from the provider * added test_ibm_job and made necessary changes * Fixed several tests * Fixed missing job methods in test * Changed exception type * Added test_ibm_job_attributes.py * Added test_ibm_job_attributes.py that was missed in previous commit * Added test class TestBackendRunInSession for backend.run with session * Cleaning up code * lint, added missing parameter * Added more tests from qiskit-ibm-provider * Inherit from BaseQiskitTestCase * Enabled several tests * removed method _deprecate_id_instruction * lint, unused imports * Removed instance parameter from tests with backend.run() * Removed instance from decorator * Changed test to run on quantum channel only * Removed instance parameter when getting backend * lint * Copied transpiler directory from the provider * black * fix more tests * update test_session * added tranpiler passes entry point * Removed obsolete JobStatus types, and removed the tests that were checking them * Removed unnecessary check * Removed exception parameter from validate_job_tags. Use 'import_job_tags' from runtime instead of from provider * Put back the check if circuit is indeed of type 'QuantumCircuit'. Updated the hint accordingly * Update qiskit_ibm_runtime/ibm_backend.py Co-authored-by: Jessie Yu * Cleaned up code involving session setup * Removed setting of 'skip_transpilation' because set by default by Qasm3 * Replaced in path 'qiskit-ibm-provider' with 'qiskit-ibm-runtime'. * Added None to get() statement * Changed warning to error when init_circuit is boolean * Fixed setting of start_session * Removed max_time parameter, because wasn't reaching the server. * Release note * address comment --------- Co-authored-by: Kevin Tian Co-authored-by: Jessie Yu --- qiskit_ibm_runtime/ibm_backend.py | 310 ++++++++- qiskit_ibm_runtime/qiskit_runtime_service.py | 3 + qiskit_ibm_runtime/runtime_job.py | 10 +- qiskit_ibm_runtime/runtime_options.py | 2 +- qiskit_ibm_runtime/transpiler/__init__.py | 31 + .../transpiler/passes/__init__.py | 36 + .../transpiler/passes/basis/__init__.py | 23 + .../passes/basis/convert_id_to_delay.py | 87 +++ .../transpiler/passes/scheduling/__init__.py | 397 +++++++++++ .../passes/scheduling/block_base_padder.py | 620 +++++++++++++++++ .../passes/scheduling/dynamical_decoupling.py | 553 +++++++++++++++ .../transpiler/passes/scheduling/pad_delay.py | 78 +++ .../transpiler/passes/scheduling/scheduler.py | 643 ++++++++++++++++++ .../transpiler/passes/scheduling/utils.py | 287 ++++++++ qiskit_ibm_runtime/transpiler/plugin.py | 98 +++ qiskit_ibm_runtime/utils/utils.py | 9 +- .../notes/backend_run-d5a92a4d677da6c1.yaml | 6 + setup.py | 6 + test/decorators.py | 47 ++ test/fake_account_client.py | 531 +++++++++++++++ test/ibm_test_case.py | 5 +- test/integration/test_backend.py | 88 ++- test/integration/test_ibm_job.py | 436 ++++++++++++ test/integration/test_ibm_job_attributes.py | 323 +++++++++ test/integration/test_ibm_qasm_simulator.py | 171 +++++ test/integration/test_session.py | 75 ++ test/unit/test_backend.py | 308 +++++++++ test/utils.py | 95 +++ 28 files changed, 5246 insertions(+), 32 deletions(-) create mode 100644 qiskit_ibm_runtime/transpiler/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py create mode 100644 qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py create mode 100644 qiskit_ibm_runtime/transpiler/plugin.py create mode 100644 releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml create mode 100644 test/fake_account_client.py create mode 100644 test/integration/test_ibm_job.py create mode 100644 test/integration/test_ibm_job_attributes.py create mode 100644 test/integration/test_ibm_qasm_simulator.py create mode 100644 test/unit/test_backend.py diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index a3da57099..d70c81683 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -13,13 +13,16 @@ """Module for interfacing with an IBM Quantum Backend.""" import logging - -from typing import Iterable, Union, Optional, Any, List +from typing import Iterable, Union, Optional, Any, List, Dict from datetime import datetime as python_datetime from copy import deepcopy +from dataclasses import asdict +import warnings from qiskit import QuantumCircuit from qiskit.qobj.utils import MeasLevel, MeasReturnType +from qiskit.tools.events.pubsub import Publisher + from qiskit.providers.backend import BackendV2 as Backend from qiskit.providers.options import Options from qiskit.providers.models import ( @@ -42,11 +45,19 @@ defaults_from_server_data, properties_from_server_data, ) -from qiskit_ibm_provider.utils import local_to_utc +from qiskit_ibm_provider.utils import local_to_utc, are_circuits_dynamic +from qiskit_ibm_provider.utils.options import QASM2Options, QASM3Options +from qiskit_ibm_provider.exceptions import IBMBackendValueError, IBMBackendApiError +from qiskit_ibm_provider.api.exceptions import RequestsApiError -from qiskit_ibm_runtime import ( # pylint: disable=unused-import,cyclic-import - qiskit_runtime_service, -) +# temporary until we unite the 2 Session classes +from qiskit_ibm_provider.session import ( + Session as ProviderSession, +) # temporary until we unite the 2 Session classes + +from .utils.utils import validate_job_tags +from . import qiskit_runtime_service # pylint: disable=unused-import,cyclic-import +from .runtime_job import RuntimeJob from .api.clients import RuntimeClient from .api.clients.backend import BaseBackendClient @@ -57,6 +68,9 @@ logger = logging.getLogger(__name__) +QOBJRUNNERPROGRAMID = "circuit-runner" +QASM3RUNNERPROGRAMID = "qasm3-runner" + class IBMBackend(Backend): """Backend class interfacing with an IBM Quantum backend. @@ -180,6 +194,7 @@ def __init__( self._defaults = None self._target = None self._max_circuits = configuration.max_experiments + self._session: ProviderSession = None if ( not self._configuration.simulator and hasattr(self.options, "noise_model") @@ -492,10 +507,25 @@ def __call__(self) -> "IBMBackend": # For backward compatibility only, can be removed later. return self - def run(self, *args: Any, **kwargs: Any) -> None: - """Not supported method""" - # pylint: disable=arguments-differ - raise RuntimeError("IBMBackend.run() is not supported in the Qiskit Runtime environment.") + def _check_circuits_attributes(self, circuits: List[Union[QuantumCircuit, str]]) -> None: + """Check that circuits can be executed on backend. + Raises: + IBMBackendValueError: + - If one of the circuits contains more qubits than on the backend.""" + + if len(circuits) > self._max_circuits: + raise IBMBackendValueError( + f"Number of circuits, {len(circuits)} exceeds the " + f"maximum for this backend, {self._max_circuits})" + ) + for circ in circuits: + if isinstance(circ, QuantumCircuit): + if circ.num_qubits > self._configuration.num_qubits: + raise IBMBackendValueError( + f"Circuit contains {circ.num_qubits} qubits, " + f"but backend has only {self.num_qubits}." + ) + self.check_faulty(circ) def check_faulty(self, circuit: QuantumCircuit) -> None: """Check if the input circuit uses faulty qubits or edges. @@ -549,6 +579,266 @@ def __deepcopy__(self, _memo: dict = None) -> "IBMBackend": cpy._options = deepcopy(self._options, _memo) return cpy + def run( + self, + circuits: Union[QuantumCircuit, str, List[Union[QuantumCircuit, str]]], + dynamic: bool = None, + job_tags: Optional[List[str]] = None, + init_circuit: Optional[QuantumCircuit] = None, + init_num_resets: Optional[int] = None, + header: Optional[Dict] = None, + shots: Optional[Union[int, float]] = None, + memory: Optional[bool] = None, + meas_level: Optional[Union[int, MeasLevel]] = None, + meas_return: Optional[Union[str, MeasReturnType]] = None, + rep_delay: Optional[float] = None, + init_qubits: Optional[bool] = None, + use_measure_esp: Optional[bool] = None, + noise_model: Optional[Any] = None, + seed_simulator: Optional[int] = None, + **run_config: Dict, + ) -> RuntimeJob: + """Run on the backend. + If a keyword specified here is also present in the ``options`` attribute/object, + the value specified here will be used for this run. + + Args: + circuits: An individual or a + list of :class:`~qiskit.circuits.QuantumCircuit`. + dynamic: Whether the circuit is dynamic (uses in-circuit conditionals) + job_tags: Tags to be assigned to the job. The tags can subsequently be used + as a filter in the :meth:`jobs()` function call. + init_circuit: A quantum circuit to execute for initializing qubits before each circuit. + If specified, ``init_num_resets`` is ignored. Applicable only if ``dynamic=True`` + is specified. + init_num_resets: The number of qubit resets to insert before each circuit execution. + + The following parameters are applicable only if ``dynamic=False`` is specified or + defaulted to. + + header: User input that will be attached to the job and will be + copied to the corresponding result header. Headers do not affect the run. + This replaces the old ``Qobj`` header. + shots: Number of repetitions of each circuit, for sampling. Default: 4000 + or ``max_shots`` from the backend configuration, whichever is smaller. + memory: If ``True``, per-shot measurement bitstrings are returned as well + (provided the backend supports it). For OpenPulse jobs, only + measurement level 2 supports this option. + meas_level: Level of the measurement output for pulse experiments. See + `OpenPulse specification `_ for details: + + * ``0``, measurements of the raw signal (the measurement output pulse envelope) + * ``1``, measurement kernel is selected (a complex number obtained after applying the + measurement kernel to the measurement output signal) + * ``2`` (default), a discriminator is selected and the qubit state is stored (0 or 1) + + meas_return: Level of measurement data for the backend to return. For ``meas_level`` 0 and 1: + + * ``single`` returns information from every shot. + * ``avg`` returns average measurement output (averaged over number of shots). + + rep_delay: Delay between programs in seconds. Only supported on certain + backends (if ``backend.configuration().dynamic_reprate_enabled=True``). + If supported, ``rep_delay`` must be from the range supplied + by the backend (``backend.configuration().rep_delay_range``). Default is given by + ``backend.configuration().default_rep_delay``. + init_qubits: Whether to reset the qubits to the ground state for each shot. + Default: ``True``. + use_measure_esp: Whether to use excited state promoted (ESP) readout for measurements + which are the terminal instruction to a qubit. ESP readout can offer higher fidelity + than standard measurement sequences. See + `here `_. + Default: ``True`` if backend supports ESP readout, else ``False``. Backend support + for ESP readout is determined by the flag ``measure_esp_enabled`` in + ``backend.configuration()``. + noise_model: Noise model. (Simulators only) + seed_simulator: Random seed to control sampling. (Simulators only) + **run_config: Extra arguments used to configure the run. + + Returns: + The job to be executed. + + Raises: + IBMBackendApiError: If an unexpected error occurred while submitting + the job. + IBMBackendApiProtocolError: If an unexpected value received from + the server. + IBMBackendValueError: + - If an input parameter value is not valid. + - If ESP readout is used and the backend does not support this. + """ + # pylint: disable=arguments-differ + validate_job_tags(job_tags) + if not isinstance(circuits, List): + circuits = [circuits] + self._check_circuits_attributes(circuits) + + if use_measure_esp and getattr(self.configuration(), "measure_esp_enabled", False) is False: + raise IBMBackendValueError( + "ESP readout not supported on this device. Please make sure the flag " + "'use_measure_esp' is unset or set to 'False'." + ) + actually_dynamic = are_circuits_dynamic(circuits) + if dynamic is False and actually_dynamic: + warnings.warn( + "Parameter 'dynamic' is False, but the circuit contains dynamic constructs." + ) + dynamic = dynamic or actually_dynamic + + if dynamic and "qasm3" not in getattr(self.configuration(), "supported_features", []): + warnings.warn(f"The backend {self.name} does not support dynamic circuits.") + + status = self.status() + if status.operational is True and status.status_msg != "active": + warnings.warn(f"The backend {self.name} is currently paused.") + + program_id = str(run_config.get("program_id", "")) + if program_id: + run_config.pop("program_id", None) + else: + program_id = QASM3RUNNERPROGRAMID if dynamic else QOBJRUNNERPROGRAMID + + image: Optional[str] = run_config.get("image", None) # type: ignore + if image is not None: + image = str(image) + + if isinstance(init_circuit, bool): + raise IBMBackendApiError( + "init_circuit does not accept boolean values. " + "A quantum circuit should be passed in instead." + ) + + if isinstance(shots, float): + shots = int(shots) + + run_config_dict = self._get_run_config( + program_id=program_id, + init_circuit=init_circuit, + init_num_resets=init_num_resets, + header=header, + shots=shots, + memory=memory, + meas_level=meas_level, + meas_return=meas_return, + rep_delay=rep_delay, + init_qubits=init_qubits, + use_measure_esp=use_measure_esp, + noise_model=noise_model, + seed_simulator=seed_simulator, + **run_config, + ) + + run_config_dict["circuits"] = circuits + + return self._runtime_run( + program_id=program_id, + inputs=run_config_dict, + backend_name=self.name, + job_tags=job_tags, + image=image, + ) + + def _runtime_run( + self, + program_id: str, + inputs: Dict, + backend_name: str, + job_tags: Optional[List[str]] = None, + image: Optional[str] = None, + ) -> RuntimeJob: + """Runs the runtime program and returns the corresponding job object""" + hgp_name = None + if self._service._channel == "ibm_quantum": + hgp_name = self._instance or self._service._get_hgp().name + + if self._session: + if not self._session.active: + raise RuntimeError(f"The session {self._session.session_id} is closed.") + session_id = self._session.session_id + start_session = session_id is None + else: + session_id = None + start_session = False + + log_level = getattr(self.options, "log_level", None) # temporary + try: + response = self._api_client.program_run( + program_id=program_id, + backend_name=backend_name, + params=inputs, + hgp=hgp_name, + log_level=log_level, + job_tags=job_tags, + session_id=session_id, + start_session=start_session, + image=image, + ) + except RequestsApiError as ex: + raise IBMBackendApiError("Error submitting job: {}".format(str(ex))) from ex + session_id = response.get("session_id", None) + if self._session: + self._session._session_id = session_id + try: + job = RuntimeJob( + backend=self, + api_client=self._api_client, + client_params=self._service._client_params, + job_id=response["id"], + program_id=program_id, + session_id=session_id, + service=self.service, + ) + logger.debug("Job %s was successfully submitted.", job.job_id()) + except TypeError as err: + logger.debug("Invalid job data received: %s", response) + raise IBMBackendApiProtocolError( + "Unexpected return value received from the server " + "when submitting job: {}".format(str(err)) + ) from err + Publisher().publish("ibm.job.start", job) + return job + + def _get_run_config(self, program_id: str, **kwargs: Any) -> Dict: + """Return the consolidated runtime configuration.""" + # Check if is a QASM3 like program id. + if program_id.startswith(QASM3RUNNERPROGRAMID): + fields = asdict(QASM3Options()).keys() + run_config_dict = QASM3Options().to_transport_dict() + else: + fields = asdict(QASM2Options()).keys() + run_config_dict = QASM2Options().to_transport_dict() + backend_options = self._options.__dict__ + for key, val in kwargs.items(): + if val is not None: + run_config_dict[key] = val + if key not in fields and not self.configuration().simulator: + warnings.warn( # type: ignore[unreachable] + f"{key} is not a recognized runtime option and may be ignored by the backend.", + stacklevel=4, + ) + elif backend_options.get(key) is not None and key in fields: + run_config_dict[key] = backend_options[key] + return run_config_dict + + def open_session(self) -> ProviderSession: + """Open session""" + self._session = ProviderSession() + return self._session + + @property + def session(self) -> ProviderSession: + """Return session""" + return self._session + + def cancel_session(self) -> None: + """Cancel session. All pending jobs will be cancelled.""" + if self._session: + self._session.cancel() + if self._session.session_id: + self._api_client.close_session(self._session.session_id) + + self._session = None + class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index c2a14297e..67a4e2e08 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -34,6 +34,7 @@ from qiskit_ibm_provider.utils.backend_decoder import configuration_from_server_data from qiskit_ibm_runtime import ibm_backend +from .utils.utils import validate_job_tags from .accounts import AccountManager, Account, ChannelType from .api.clients import AuthClient, VersionClient from .api.clients.runtime import RuntimeClient @@ -1441,6 +1442,8 @@ def jobs( "The 'instance' keyword is only supported for ``ibm_quantum`` runtime." ) hub, group, project = from_instance_format(instance) + if job_tags: + validate_job_tags(job_tags) job_responses = [] # type: List[Dict[str, Any]] current_page_limit = limit or 20 diff --git a/qiskit_ibm_runtime/runtime_job.py b/qiskit_ibm_runtime/runtime_job.py index 2e359d5e2..9e9a80356 100644 --- a/qiskit_ibm_runtime/runtime_job.py +++ b/qiskit_ibm_runtime/runtime_job.py @@ -27,9 +27,10 @@ from qiskit.providers.job import JobV1 as Job # pylint: disable=unused-import,cyclic-import -from qiskit_ibm_provider.utils import validate_job_tags, utc_to_local +from qiskit_ibm_provider.utils import utc_to_local from qiskit_ibm_runtime import qiskit_runtime_service +from .utils.utils import validate_job_tags from .constants import API_TO_JOB_ERROR_MESSAGE, API_TO_JOB_STATUS, DEFAULT_DECODERS from .exceptions import ( IBMApiError, @@ -40,12 +41,10 @@ RuntimeJobMaxTimeoutError, ) from .program.result_decoder import ResultDecoder -from .utils import RuntimeDecoder from .api.clients import RuntimeClient, RuntimeWebsocketClient, WebsocketClientCloseCode from .exceptions import IBMError from .api.exceptions import RequestsApiError from .api.client_parameters import ClientParameters -from .utils.utils import CallableStr logger = logging.getLogger(__name__) @@ -419,7 +418,7 @@ def update_tags(self, new_tags: List[str]) -> List[str]: with the server or updating the job tags. """ tags_to_update = set(new_tags) - validate_job_tags(new_tags, RuntimeInvalidStateError) + validate_job_tags(new_tags) response = self._api_client.update_tags(job_id=self.job_id(), tags=list(tags_to_update)) @@ -575,7 +574,8 @@ def _stream_results( traceback.format_exc(), ) - def _empty_result_queue(self, result_queue: queue.Queue) -> None: + @staticmethod + def _empty_result_queue(result_queue: queue.Queue) -> None: """Empty the result queue. Args: diff --git a/qiskit_ibm_runtime/runtime_options.py b/qiskit_ibm_runtime/runtime_options.py index eea530628..354ae1a28 100644 --- a/qiskit_ibm_runtime/runtime_options.py +++ b/qiskit_ibm_runtime/runtime_options.py @@ -103,4 +103,4 @@ def validate(self, channel: str) -> None: ) if self.job_tags: - validate_job_tags(self.job_tags, IBMInputValueError) + validate_job_tags(self.job_tags) diff --git a/qiskit_ibm_runtime/transpiler/__init__.py b/qiskit_ibm_runtime/transpiler/__init__.py new file mode 100644 index 000000000..d6e62daa4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/__init__.py @@ -0,0 +1,31 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +IBM Backend Transpiler Tools (:mod:`qiskit_ibm_provider.transpiler`) +==================================================================== + +A collection of transpiler tools for working with IBM Quantum's +next-generation backends that support advanced "dynamic circuit" +capabilities. Ie., circuits with support for classical +compute and control-flow/feedback based off of measurement results. + +Transpiler Passes +================== + +.. autosummary:: + :toctree: ../stubs/ + + passes + +""" diff --git a/qiskit_ibm_runtime/transpiler/passes/__init__.py b/qiskit_ibm_runtime/transpiler/passes/__init__.py new file mode 100644 index 000000000..2fe16514c --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/__init__.py @@ -0,0 +1,36 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +================================================================ +Transpiler Passes (:mod:`qiskit_ibm_provider.transpiler.passes`) +================================================================ + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes + +A collection of transpiler passes for IBM backends. + +.. autosummary:: + :toctree: ../stubs/ + + basis + scheduling + + +""" + +from .basis import ConvertIdToDelay + +# circuit scheduling +from .scheduling import ASAPScheduleAnalysis +from .scheduling import PadDynamicalDecoupling +from .scheduling import PadDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py new file mode 100644 index 000000000..0a71af010 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/__init__.py @@ -0,0 +1,23 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +========================================================== +Basis (:mod:`qiskit_ibm_provider.transpiler.passes.basis`) +========================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.basis + +Passes to layout circuits to IBM backend's instruction sets. +""" + +from .convert_id_to_delay import ConvertIdToDelay diff --git a/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py new file mode 100644 index 000000000..3906d9046 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/basis/convert_id_to_delay.py @@ -0,0 +1,87 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Pass to convert Id gate operations to a delay instruction.""" + +from typing import Dict + +from qiskit.converters import dag_to_circuit, circuit_to_dag + +from qiskit.circuit import ControlFlowOp +from qiskit.circuit import Delay +from qiskit.circuit.library import IGate +from qiskit.dagcircuit import DAGCircuit +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.instruction_durations import InstructionDurations + + +class ConvertIdToDelay(TransformationPass): + """Convert :class:`qiskit.circuit.library.standard_gates.IGate` to + a delay of the corresponding length. + """ + + def __init__(self, durations: InstructionDurations, gate: str = "sx"): + """Convert :class:`qiskit.circuit.library.IGate` to a + Convert :class:`qiskit.circuit.Delay`. + + Args: + duration: Duration of the delay to replace the identity gate with. + gate: Single qubit gate to extract duration from. + """ + self.durations = durations + self.gate = gate + self._cached_durations: Dict[int, int] = {} + + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + self._run_inner(dag) + return dag + + def _run_inner(self, dag: DAGCircuit) -> bool: + """Run the pass on one :class:`.DAGCircuit`, mutating it. Returns ``True`` if the circuit + was modified and ``False`` if not.""" + modified = False + qubit_index_map = {bit: index for index, bit in enumerate(dag.qubits)} + for node in dag.op_nodes(): + if isinstance(node.op, ControlFlowOp): + modified_blocks = False + new_dags = [] + for block in node.op.blocks: + new_dag = circuit_to_dag(block) + modified_blocks |= self._run_inner(new_dag) + new_dags.append(new_dag) + if not modified_blocks: + continue + dag.substitute_node( + node, + node.op.replace_blocks(dag_to_circuit(block) for block in new_dags), + inplace=True, + ) + elif isinstance(node.op, IGate): + delay_op = Delay(self._get_duration(qubit_index_map[node.qargs[0]])) + dag.substitute_node(node, delay_op, inplace=True) + + modified = True + + return modified + + def _get_duration(self, qubit: int) -> int: + """Get the duration of a gate in dt.""" + duration = self._cached_durations.get(qubit, None) + if duration: + return duration + + duration = self.durations.get(self.gate, qubit) + self._cached_durations[qubit] = duration + + return duration diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py new file mode 100644 index 000000000..c3017e9bc --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/__init__.py @@ -0,0 +1,397 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +==================================================================== +Scheduling (:mod:`qiskit_ibm_provider.transpiler.passes.scheduling`) +==================================================================== + +.. currentmodule:: qiskit_ibm_provider.transpiler.passes.scheduling + +A collection of scheduling passes for working with IBM Quantum's next-generation +backends that support advanced "dynamic circuit" capabilities. Ie., +circuits with support for classical control-flow/feedback based off +of measurement results. + +.. warning:: + You should not mix these scheduling passes with Qiskit's builtin scheduling + passes as they will negatively interact with the scheduling routines for + dynamic circuits. This includes setting ``scheduling_method`` in + :func:`~qiskit.compiler.transpile` or + :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. + +Below we demonstrate how to schedule and pad a teleportation circuit with delays +for a dynamic circuit backend's execution model: + +.. jupyter-execute:: + + from qiskit.circuit import ClassicalRegister, QuantumCircuit, QuantumRegister + from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager + from qiskit.transpiler.passmanager import PassManager + + from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitInstructionDurations + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDelay + from qiskit.providers.fake_provider import FakeJakarta + + + backend = FakeJakarta() + + # Temporary workaround for mock backends. For real backends this is not required. + backend.configuration().basis_gates.append("if_else") + + + # Use this duration class to get appropriate durations for dynamic + # circuit backend scheduling + durations = DynamicCircuitInstructionDurations.from_backend(backend) + # Generate the main Qiskit transpile passes. + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + # Configure the as-late-as-possible scheduling pass + pm.scheduling = PassManager([ALAPScheduleAnalysis(durations), PadDelay()]) + + qr = QuantumRegister(3) + crz = ClassicalRegister(1, name="crz") + crx = ClassicalRegister(1, name="crx") + result = ClassicalRegister(1, name="result") + + teleport = QuantumCircuit(qr, crz, crx, result, name="Teleport") + + teleport.h(qr[1]) + teleport.cx(qr[1], qr[2]) + teleport.cx(qr[0], qr[1]) + teleport.h(qr[0]) + teleport.measure(qr[0], crz) + teleport.measure(qr[1], crx) + with teleport.if_test((crz, 1)): + teleport.z(qr[2]) + with teleport.if_test((crx, 1)): + teleport.x(qr[2]) + teleport.measure(qr[2], result) + + # Transpile. + scheduled_teleport = pm.run(teleport) + + scheduled_teleport.draw(output="mpl") + + +Instead of padding with delays we may also insert a dynamical decoupling sequence +using the :class:`PadDynamicalDecoupling` pass as shown below: + +.. jupyter-execute:: + + from qiskit.circuit.library import XGate + + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + + dd_sequence = [XGate(), XGate()] + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + dd_teleport = pm.run(teleport) + + dd_teleport.draw(output="mpl") + +When compiling a circuit with Qiskit, it is more efficient and more robust to perform all the +transformations in a single transpilation. This has been done above by extending Qiskit's preset +pass managers. For example, Qiskit's :func:`~qiskit.compiler.transpile` function internally builds +its pass set by using :func:`~qiskit.transpiler.preset_passmanagers.generate_preset_pass_manager`. +This returns instances of :class:`~qiskit.transpiler.StagedPassManager`, which can be extended. + + +Scheduling old format ``c_if`` conditioned gates +------------------------------------------------ + +Scheduling with old format ``c_if`` conditioned gates is not supported. + +.. jupyter-execute:: + + qc_c_if = QuantumCircuit(1, 1) + qc_c_if.x(0).c_if(0, 1) + qc_c_if.draw(output="mpl") + +The :class:`.IBMBackend` configures a translation plugin +:class:`.IBMTranslationPlugin` to automatically +apply transformations and optimizations for IBM hardware backends when invoking +:func:`~qiskit.compiler.transpile`. This will automatically convert all old style ``c_if`` +conditioned gates to new-style control-flow. +We may then schedule the transpiled circuit without further modification. + +.. jupyter-execute:: + + # Temporary workaround for mock backends. For real backends this is not required. + backend.get_translation_stage_plugin = lambda: "ibm_dynamic_circuits" + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc_if_dd = pm.run(qc_c_if, backend) + qc_if_dd.draw(output="mpl") + + +If you are not using the transpiler plugin stages to +work around this please manually run the pass +:class:`qiskit.transpiler.passes.ConvertConditionsToIfOps` +prior to your scheduling pass. + +.. jupyter-execute:: + + from qiskit.transpiler.passes import ConvertConditionsToIfOps + + pm = generate_preset_pass_manager(optimization_level=1, backend=backend) + pm.scheduling = PassManager( + [ + ConvertConditionsToIfOps(), + ALAPScheduleAnalysis(durations), + PadDelay(), + ] + ) + + qc_if_dd = pm.run(qc_c_if) + qc_if_dd.draw(output="mpl") + + +Exploiting IBM backend's local parallel "fast-path" +--------------------------------------------------- + +IBM quantum hardware supports a localized "fast-path" which enables a block of gates +applied to a *single qubit* that are conditional on an immediately predecessor measurement +*of the same qubit* to be completed with lower latency. The hardware is also +able to do this in *parallel* on disjoint qubits that satisfy this condition. + +For example, the conditional gates below are performed in parallel with lower latency +as the measurements flow directly into the conditional blocks which in turn only apply +gates to the same measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + with qc.if_test((1, 1)): + qc.x(1) + + qc.draw(output="mpl") + + +The circuit below will not use the fast-path as the conditional gate is +on a different qubit than the measurement qubit. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(1) + + qc.draw(output="mpl") + +Similarly, the circuit below contains gates on multiple qubits +and will not be performed using the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + qc.x(1) + + qc.draw(output="mpl") + +A fast-path block may contain multiple gates as long as they are on the fast-path qubit. +If there are multiple fast-path blocks being performed in parallel each block will be +padded out to the duration of the longest block. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with qc.if_test((1, 1)): + qc.delay(1600, 1) + + qc.draw(output="mpl") + +This behavior is also applied to the else condition of a fast-path eligible branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)) as else_: + qc.x(0) + # Will be padded out to a duration of 1600 on the backend. + with else_: + qc.delay(1600, 0) + + qc.draw(output="mpl") + + +If a single measurement result is used with several conditional blocks, if there is a fast-path +eligible block it will be applied followed by the non-fast-path blocks which will execute with +the standard higher latency conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + # Conditional blocks will be performed in parallel in the hardware + with qc.if_test((0, 1)): + # Uses fast-path + qc.x(0) + with qc.if_test((0, 1)): + # Does not use fast-path + qc.x(1) + + qc.draw(output="mpl") + +If you wish to prevent the usage of the fast-path you may insert a barrier between the measurement and +the conditional branch. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + # Barrier prevents the fast-path. + qc.barrier() + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + +Conditional measurements are not eligible for the fast-path. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 2) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.measure(0, 1) + + qc.draw(output="mpl") + +Similarly nested control-flow is not eligible. + +.. jupyter-execute:: + + qc = QuantumCircuit(1, 1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + # Does not use the fast-path + qc.x(0) + with qc.if_test((0, 1)): + qc.x(0) + + qc.draw(output="mpl") + + +The scheduler is aware of the fast-path behavior and will not insert delays on idle qubits +in blocks that satisfy the fast-path conditions so as to avoid preventing the backend +compiler from performing the necessary optimizations to utilize the fast-path. If +there are fast-path blocks that will be performed in parallel they currently *will not* +be padded out by the scheduler to ensure they are of the same duration in Qiskit + +.. jupyter-execute:: + + dd_sequence = [XGate(), XGate()] + + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence), + ] + ) + + qc = QuantumCircuit(2, 2) + qc.measure(0, 0) + qc.measure(1, 1) + with qc.if_test((0, 1)): + qc.x(0) + # Is currently not padded to ensure + # a duration of 1000. If you desire + # this you would need to manually add + # qc.delay(840, 0) + with qc.if_test((1, 1)): + qc.delay(1000, 0) + + + qc.draw(output="mpl") + + qc_dd = pm.run(qc) + + qc_dd.draw(output="mpl") + +.. note:: + If there are qubits that are *not* involved in a fast-path decision it is not + currently possible to use them in a fast-path branch in parallel with the fast-path + qubits resulting from a measurement. This will be revised in the future as we + further improve these capabilities. + + For example: + + .. jupyter-execute:: + + qc = QuantumCircuit(3, 2) + qc.x(1) + qc.measure(0, 0) + with qc.if_test((0, 1)): + qc.x(0) + # Qubit 1 sits idle throughout the fast-path decision + with qc.if_test((1, 0)): + # Qubit 2 is idle but there is no measurement + # to make it fast-path eligible. This will + # however avoid a communication event in the hardware + # since the condition is compile time evaluated. + qc.x(2) + + qc.draw(output="mpl") + + +Scheduling & Dynamical Decoupling +================================= +.. autosummary:: + :toctree: ../stubs/ + + BlockBasePadder + ALAPScheduleAnalysis + ASAPScheduleAnalysis + DynamicCircuitInstructionDurations + PadDelay + PadDynamicalDecoupling +""" + +from .block_base_padder import BlockBasePadder +from .dynamical_decoupling import PadDynamicalDecoupling +from .pad_delay import PadDelay +from .scheduler import ALAPScheduleAnalysis, ASAPScheduleAnalysis +from .utils import DynamicCircuitInstructionDurations diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py new file mode 100644 index 000000000..1232750a5 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/block_base_padder.py @@ -0,0 +1,620 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to fill timeslots for IBM (dynamic circuit) backends.""" + +from typing import Dict, Iterable, List, Optional, Union, Set + +from qiskit.circuit import ( + Qubit, + Clbit, + ControlFlowOp, + Gate, + IfElseOp, + Instruction, + Measure, +) +from qiskit.circuit.bit import Bit +from qiskit.circuit.library import Barrier +from qiskit.circuit.delay import Delay +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import dag_to_circuit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BlockBasePadder(TransformationPass): + """The base class of padding pass. + + This pass requires one of scheduling passes to be executed before itself. + Since there are multiple scheduling strategies, the selection of scheduling + pass is left in the hands of the pass manager designer. + Once a scheduling analysis pass is run, ``node_start_time`` is generated + in the :attr:`property_set`. This information is represented by a python dictionary of + the expected instruction execution times keyed on the node instances. + The padding pass expects all ``DAGOpNode`` in the circuit to be scheduled. + + This base class doesn't define any sequence to interleave, but it manages + the location where the sequence is inserted, and provides a set of information necessary + to construct the proper sequence. Thus, a subclass of this pass just needs to implement + :meth:`_pad` method, in which the subclass constructs a circuit block to insert. + This mechanism removes lots of boilerplate logic to manage whole DAG circuits. + + Note that padding pass subclasses should define interleaving sequences satisfying: + + - Interleaved sequence does not change start time of other nodes + - Interleaved sequence should have total duration of the provided ``time_interval``. + + Any manipulation violating these constraints may prevent this base pass from correctly + tracking the start time of each instruction, + which may result in violation of hardware alignment constraints. + """ + + def __init__(self, schedule_idle_qubits: bool = False) -> None: + self._node_start_time = None + self._node_block_dags = None + self._idle_after: Optional[Dict[Qubit, int]] = None + self._root_dag = None + self._dag = None + self._block_dag = None + self._prev_node: Optional[DAGNode] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._block_duration = 0 + self._current_block_idx = 0 + self._conditional_block = False + self._bit_indices: Optional[Dict[Qubit, int]] = None + # Nodes that the scheduling of this node is tied to. + + self._last_node_to_touch: Optional[Dict[Qubit, DAGNode]] = None + # Last node to touch a bit + + self._fast_path_nodes: Set[DAGNode] = set() + + self._dirty_qubits: Set[Qubit] = set() + # Qubits that are dirty in the circuit. + self._schedule_idle_qubits = schedule_idle_qubits + self._idle_qubits: Set[Qubit] = set() + super().__init__() + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the padding pass on ``dag``. + + Args: + dag: DAG to be checked. + + Returns: + DAGCircuit: DAG with idle time filled with instructions. + + Raises: + TranspilerError: When a particular node is not scheduled, likely some transform pass + is inserted before this node is called. + """ + if not self._schedule_idle_qubits: + self._idle_qubits = set(wire for wire in dag.idle_wires() if isinstance(wire, Qubit)) + self._pre_runhook(dag) + + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + new_dag = self._visit_block(dag, wire_map) + + return new_dag + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + self._node_start_time = self.property_set["node_start_time"].copy() + self._node_block_dags = self.property_set["node_block_dags"] + self._idle_after = {bit: 0 for bit in dag.qubits} + self._current_block_idx = 0 + self._conditional_block = False + self._block_duration = 0 + + # Prepare DAG to pad + self._root_dag = dag + self._dag = self._empty_dag_like(dag) + self._block_dag = self._dag + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + self._last_node_to_touch = {} + self._fast_path_nodes = set() + self._dirty_qubits = set() + + self.property_set["node_start_time"].clear() + self._prev_node = None + self._wire_map = {} + + def _empty_dag_like( + self, + dag: DAGCircuit, + pad_wires: bool = True, + wire_map: Optional[Dict[Qubit, Qubit]] = None, + ignore_idle: bool = False, + ) -> DAGCircuit: + """Create an empty dag like the input dag.""" + new_dag = DAGCircuit() + + # Ensure *all* registers are included from the input circuit + # so that they are scheduled in sub-blocks + + # The top-level QuantumCircuit has the full registers available + # Control flow blocks do not get the full register added to the + # block but just the bits. When testing for equivalency the register + # information is taken into account. To work around this we try to + # while enabling generic handling of QuantumCircuits we + # add the register if available and otherwise add the bits directly. + # We need this work around as otherwise the padded circuit will + # not be equivalent to one written manually as bits will not + # be defined on registers like in the test case. + + source_wire_dag = self._root_dag if pad_wires else dag + + # trivial wire map if not provided, or if the top-level dag is used + if not wire_map or pad_wires: + wire_map = {wire: wire for wire in source_wire_dag.wires} + if dag.qregs and self._schedule_idle_qubits or not ignore_idle: + for qreg in source_wire_dag.qregs.values(): + new_dag.add_qreg(qreg) + else: + new_dag.add_qubits( + [ + wire_map[qubit] + for qubit in source_wire_dag.qubits + if qubit not in self._idle_qubits or not ignore_idle + ] + ) + + # Don't add root cargs as these will not be padded. + # Just focus on current block dag. + if dag.cregs: + for creg in dag.cregs.values(): + new_dag.add_creg(creg) + else: + new_dag.add_clbits(dag.clbits) + + new_dag.name = dag.name + new_dag.metadata = dag.metadata + new_dag.unit = self.property_set["time_unit"] or "dt" + if new_dag.unit != "dt": + raise TranspilerError( + 'All blocks must have time units of "dt". ' + "Please run TimeUnitConversion pass prior to padding." + ) + + new_dag.calibrations = dag.calibrations + new_dag.global_phase = dag.global_phase + return new_dag + + def _pre_runhook(self, dag: DAGCircuit) -> None: + """Extra routine inserted before running the padding pass. + + Args: + dag: DAG circuit on which the sequence is applied. + + Raises: + TranspilerError: If the whole circuit or instruction is not scheduled. + """ + if "node_start_time" not in self.property_set: + raise TranspilerError( + f"The input circuit {dag.name} is not scheduled. Call one of scheduling passes " + f"before running the {self.__class__.__name__} pass." + ) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + """Interleave instruction sequence in between two nodes. + + .. note:: + If a DAGOpNode is added here, it should update node_start_time property + in the property set so that the added node is also scheduled. + This is achieved by adding operation via :meth:`_apply_scheduled_op`. + + .. note:: + + This method doesn't check if the total duration of new DAGOpNode added here + is identical to the interval (``t_end - t_start``). + A developer of the pass must guarantee this is satisfied. + If the duration is greater than the interval, your circuit may be + compiled down to the target code with extra duration on the backend compiler, + which is then played normally without error. However, the outcome of your circuit + might be unexpected due to erroneous scheduling. + + Args: + block_idx: Execution block index for this node. + qubit: The wire that the sequence is applied on. + t_start: Absolute start time of this interval. + t_end: Absolute end time of this interval. + next_node: Node that follows the sequence. + prev_node: Node ahead of the sequence. + """ + raise NotImplementedError + + def _get_node_duration(self, node: DAGNode) -> int: + """Get the duration of a node.""" + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_wires(node.qargs)] + + if self._block_dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = self._block_dag.calibrations[node.op.name][cal_key].duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") + + return duration + + def _needs_block_terminating_barrier(self, prev_node: DAGNode, curr_node: DAGNode) -> bool: + # Only barrier if not in fast-path nodes + is_fast_path_node = curr_node in self._fast_path_nodes + + def _is_terminating_barrier(node: DAGNode) -> bool: + return ( + isinstance(node.op, (Barrier, ControlFlowOp)) + and len(node.qargs) == self._block_dag.num_qubits() + ) + + return not ( + prev_node is None + or (isinstance(prev_node.op, ControlFlowOp) and isinstance(curr_node.op, ControlFlowOp)) + or _is_terminating_barrier(prev_node) + or _is_terminating_barrier(curr_node) + or is_fast_path_node + ) + + def _add_block_terminating_barrier( + self, block_idx: int, time: int, current_node: DAGNode, force: bool = False + ) -> None: + """Add a block terminating barrier to prevent topological ordering slide by. + + TODO: Fix by ensuring control-flow is a block terminator in the core circuit IR. + """ + # Only add a barrier to the end if a viable barrier is not already present on all qubits + # Only barrier if not in fast-path nodes + needs_terminating_barrier = True + if not force: + needs_terminating_barrier = self._needs_block_terminating_barrier( + self._prev_node, current_node + ) + + if needs_terminating_barrier: + # Terminate with a barrier to ensure topological ordering does not slide past + if self._schedule_idle_qubits: + barrier = Barrier(self._block_dag.num_qubits()) + qubits = self._block_dag.qubits + else: + barrier = Barrier(self._block_dag.num_qubits() - len(self._idle_qubits)) + qubits = [x for x in self._block_dag.qubits if x not in self._idle_qubits] + + barrier_node = self._apply_scheduled_op( + block_idx, + time, + barrier, + qubits, + [], + ) + barrier_node.op.duration = 0 + + def _visit_block( + self, + block: DAGCircuit, + wire_map: Dict[Qubit, Qubit], + pad_wires: bool = True, + ignore_idle: bool = False, + ) -> DAGCircuit: + # Push the previous block dag onto the stack + prev_node = self._prev_node + self._prev_node = None + prev_wire_map, self._wire_map = self._wire_map, wire_map + + prev_block_dag = self._block_dag + self._block_dag = new_block_dag = self._empty_dag_like( + block, pad_wires, wire_map=wire_map, ignore_idle=ignore_idle + ) + + self._block_duration = 0 + self._conditional_block = False + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Terminate the block to pad it after scheduling. + prev_block_duration = self._block_duration + prev_block_idx = self._current_block_idx + self._terminate_block(self._block_duration, self._current_block_idx) + + # Edge-case: Add a barrier if the final node is a fast-path + if self._prev_node in self._fast_path_nodes: + self._add_block_terminating_barrier( + prev_block_duration, prev_block_idx, self._prev_node, force=True + ) + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._prev_node = prev_node + self._wire_map = prev_wire_map + + return new_block_dag + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + if isinstance(node.op, IfElseOp): + self._visit_if_else_op(node) + else: + self._visit_control_flow_op(node) + elif node in self._node_start_time: + if isinstance(node.op, Delay): + self._visit_delay(node) + else: + self._visit_generic(node) + else: + raise TranspilerError( + f"Operation {repr(node)} is likely added after the circuit is scheduled. " + "Schedule the circuit again if you transformed it." + ) + self._prev_node = node + + def _visit_if_else_op(self, node: DAGNode) -> None: + """check if is fast-path eligible otherwise fall back + to standard ControlFlowOp handling.""" + + if self._will_use_fast_path(node): + self._fast_path_nodes.add(node) + self._visit_control_flow_op(node) + + def _will_use_fast_path(self, node: DAGNode) -> bool: + """Check if this conditional operation will be scheduled on the fastpath. + This will happen if + 1. This operation is a direct descendent of a current measurement block to be flushed + 2. The operation only operates on the qubit that is measured. + """ + # Verify IfElseOp has a direct measurement predecessor + condition_bits = node.op.condition_bits + # Fast-path valid only with a single bit. + if not condition_bits or len(condition_bits) > 1: + return False + + bit = condition_bits[0] + last_node, last_node_dag = self._last_node_to_touch.get(bit, (None, None)) + + last_node_in_block = last_node_dag is self._block_dag + + if not ( + last_node_in_block + and isinstance(last_node.op, Measure) + and set(self._map_wires(node.qargs)) == set(self._map_wires(last_node.qargs)) + ): + return False + + # Fast path contents are limited to gates and delays + for block in node.op.blocks: + if not all(isinstance(inst.operation, (Gate, Delay)) for inst in block.data): + return False + return True + + def _visit_control_flow_op(self, node: DAGNode) -> None: + """Visit a control-flow node to pad.""" + + # Control-flow terminator ends scheduling of block currently + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + self._terminate_block(t0, block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # Only pad non-fast path nodes + fast_path_node = node in self._fast_path_nodes + + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by extracting the cached dag blocks that were + # stored by the scheduling pass. + new_node_block_dags = [] + for block_idx, _ in enumerate(node.op.blocks): + block_dag = self._node_block_dags[node][block_idx] + inner_wire_map = { + inner: outer + for outer, inner in zip( + self._map_wires(node.qargs + node.cargs), + block_dag.qubits + block_dag.clbits, + ) + } + new_node_block_dags.append( + self._visit_block( + block_dag, + pad_wires=not fast_path_node, + wire_map=inner_wire_map, + ignore_idle=True, + ) + ) + + # Build new control-flow operation containing scheduled blocks + # and apply to the DAG. + new_control_flow_op = node.op.replace_blocks( + dag_to_circuit(block) for block in new_node_block_dags + ) + # Enforce that this control-flow operation contains all wires since it has now been padded + # such that each qubit is scheduled within each block. Don't added all cargs as these will not + # be padded. + if fast_path_node: + padded_qubits = node.qargs + elif not self._schedule_idle_qubits: + padded_qubits = [q for q in self._block_dag.qubits if q not in self._idle_qubits] + else: + padded_qubits = self._block_dag.qubits + self._apply_scheduled_op( + block_idx, + t0, + new_control_flow_op, + padded_qubits, + self._map_wires(node.cargs), + ) + + def _visit_delay(self, node: DAGNode) -> None: + """The padding class considers a delay instruction as idle time + rather than instruction. Delay node is not added so that + we can extract non-delay predecessors. + """ + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + self._conditional_block = bool(node.op.condition_bits) + + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node to pad.""" + # Note: t0 is the relative time with respect to the current block specified + # by block_idx. + block_idx, t0 = self._node_start_time[node] # pylint: disable=invalid-name + + # Trigger the end of a block + if block_idx > self._current_block_idx: + self._terminate_block(self._block_duration, self._current_block_idx) + self._add_block_terminating_barrier(block_idx, t0, node) + + # This block will not be padded as it is conditional. + # See TODO below. + self._conditional_block = bool(node.op.condition_bits) + + # Now set the current block index. + self._current_block_idx = block_idx + + t1 = t0 + self._get_node_duration(node) # pylint: disable=invalid-name + self._block_duration = max(self._block_duration, t1) + + for bit in self._map_wires(node.qargs): + if bit in self._idle_qubits: + continue + # Fill idle time with some sequence + if t0 - self._idle_after.get(bit, 0) > 0: + # Find previous node on the wire, i.e. always the latest node on the wire + prev_node = next(self._block_dag.predecessors(self._block_dag.output_map[bit])) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=self._idle_after[bit], + t_end=t0, + next_node=node, + prev_node=prev_node, + ) + + self._idle_after[bit] = t1 + + if not isinstance(node.op, (Barrier, Delay)): + self._dirty_qubits |= set(self._map_wires(node.qargs)) + + new_node = self._apply_scheduled_op( + block_idx, + t0, + node.op, + self._map_wires(node.qargs), + self._map_wires(node.cargs), + ) + self._last_node_to_touch.update( + {bit: (new_node, self._block_dag) for bit in new_node.qargs + new_node.cargs} + ) + + def _terminate_block(self, block_duration: int, block_idx: int) -> None: + """Terminate the end of a block scheduling region.""" + # Update all other qubits as not idle so that delays are *not* + # inserted. This is because we need the delays to be inserted in + # the conditional circuit block. + self._block_duration = 0 + self._pad_until_block_end(block_duration, block_idx) + self._idle_after = {bit: 0 for bit in self._block_dag.qubits} + + def _pad_until_block_end(self, block_duration: int, block_idx: int) -> None: + # Add delays until the end of circuit. + for bit in self._block_dag.qubits: + if bit in self._idle_qubits: + continue + idle_after = self._idle_after.get(bit, 0) + if block_duration - idle_after > 0: + node = self._block_dag.output_map[bit] + prev_node = next(self._block_dag.predecessors(node)) + self._pad( + block_idx=block_idx, + qubit=bit, + t_start=idle_after, + t_end=block_duration, + next_node=node, + prev_node=prev_node, + ) + + def _apply_scheduled_op( + self, + block_idx: int, + t_start: int, + oper: Instruction, + qubits: Union[Qubit, Iterable[Qubit]], + clbits: Union[Clbit, Iterable[Clbit]] = (), + ) -> DAGNode: + """Add new operation to DAG with scheduled information. + + This is identical to apply_operation_back + updating the node_start_time propety. + + Args: + block_idx: Execution block index for this node. + t_start: Start time of new node. + oper: New operation that is added to the DAG circuit. + qubits: The list of qubits that the operation acts on. + clbits: The list of clbits that the operation acts on. + + Returns: + The DAGNode applied to. + """ + if isinstance(qubits, Qubit): + qubits = [qubits] + if isinstance(clbits, Clbit): + clbits = [clbits] + + new_node = self._block_dag.apply_operation_back(oper, qubits, clbits) + self.property_set["node_start_time"][new_node] = (block_idx, t_start) + return new_node + + def _map_wires(self, wires: Iterable[Bit]) -> List[Bit]: + """Map the wires from the current block to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [self._wire_map[w] for w in wires] diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py new file mode 100644 index 000000000..006c53feb --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/dynamical_decoupling.py @@ -0,0 +1,553 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Dynamical decoupling insertion pass for IBM (dynamic circuit) backends.""" + +import warnings +from typing import Dict, List, Optional, Union + +import numpy as np +import rustworkx as rx +from qiskit.circuit import Qubit, Gate +from qiskit.circuit.delay import Delay +from qiskit.circuit.library.standard_gates import IGate, UGate, U3Gate +from qiskit.circuit.reset import Reset +from qiskit.dagcircuit import DAGCircuit, DAGNode, DAGInNode, DAGOpNode +from qiskit.quantum_info.operators.predicates import matrix_equal +from qiskit.quantum_info.synthesis import OneQubitEulerDecomposer +from qiskit.transpiler.exceptions import TranspilerError +from qiskit.transpiler.instruction_durations import InstructionDurations +from qiskit.transpiler.passes.optimization import Optimize1qGates +from qiskit.transpiler import CouplingMap + +from .block_base_padder import BlockBasePadder + + +class PadDynamicalDecoupling(BlockBasePadder): + """Dynamical decoupling insertion pass for IBM dynamic circuit backends. + + This pass works on a scheduled, physical circuit. It scans the circuit for + idle periods of time (i.e. those containing delay instructions) and inserts + a DD sequence of gates in those spots. These gates amount to the identity, + so do not alter the logical action of the circuit, but have the effect of + mitigating decoherence in those idle periods. + As a special case, the pass allows a length-1 sequence (e.g. [XGate()]). + In this case the DD insertion happens only when the gate inverse can be + absorbed into a neighboring gate in the circuit (so we would still be + replacing Delay with something that is equivalent to the identity). + This can be used, for instance, as a Hahn echo. + This pass ensures that the inserted sequence preserves the circuit exactly + (including global phase). + + .. jupyter-execute:: + + import numpy as np + from qiskit.circuit import QuantumCircuit + from qiskit.circuit.library import XGate + from qiskit.transpiler import PassManager, InstructionDurations + from qiskit.visualization import timeline_drawer + + from qiskit_ibm_provider.transpiler.passes.scheduling import ALAPScheduleAnalysis + from qiskit_ibm_provider.transpiler.passes.scheduling import PadDynamicalDecoupling + + circ = QuantumCircuit(4) + circ.h(0) + circ.cx(0, 1) + circ.cx(1, 2) + circ.cx(2, 3) + circ.measure_all() + durations = InstructionDurations( + [("h", 0, 50), ("cx", [0, 1], 700), ("reset", None, 10), + ("cx", [1, 2], 200), ("cx", [2, 3], 300), + ("x", None, 50), ("measure", None, 1000)] + ) + + .. jupyter-execute:: + + # balanced X-X sequence on all qubits + dd_sequence = [XGate(), XGate()] + pm = PassManager([ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence)]) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. jupyter-execute:: + + # Uhrig sequence on qubit 0 + n = 8 + dd_sequence = [XGate()] * n + def uhrig_pulse_location(k): + return np.sin(np.pi * (k + 1) / (2 * n + 2)) ** 2 + spacings = [] + for k in range(n): + spacings.append(uhrig_pulse_location(k) - sum(spacings)) + spacings.append(1 - sum(spacings)) + pm = PassManager( + [ + ALAPScheduleAnalysis(durations), + PadDynamicalDecoupling(durations, dd_sequence, qubits=[0], spacings=spacings), + ] + ) + circ_dd = pm.run(circ) + circ_dd.draw() + + .. note:: + + You need to call + :class:`~qiskit_ibm_provider.transpiler.passes.scheduling.ALAPScheduleAnalysis` + before running dynamical decoupling to guarantee your circuit satisfies acquisition + alignment constraints for dynamic circuit backends. + """ + + def __init__( + self, + durations: InstructionDurations, + dd_sequences: Union[List[Gate], List[List[Gate]]], + qubits: Optional[List[int]] = None, + spacings: Optional[Union[List[List[float]], List[float]]] = None, + skip_reset_qubits: bool = True, + pulse_alignment: int = 16, + extra_slack_distribution: str = "middle", + sequence_min_length_ratios: Optional[Union[int, List[int]]] = None, + insert_multiple_cycles: bool = False, + coupling_map: CouplingMap = None, + alt_spacings: Optional[Union[List[List[float]], List[float]]] = None, + schedule_idle_qubits: bool = False, + ): + """Dynamical decoupling initializer. + + Args: + durations: Durations of instructions to be used in scheduling. + dd_sequences: Sequence of gates to apply in idle spots. + Alternatively a list of gate sequences may be supplied that + will preferentially be inserted if there is a delay of sufficient + duration. This may be tuned by the optionally supplied + ``sequence_min_length_ratios``. + qubits: Physical qubits on which to apply DD. + If None, all qubits will undergo DD (when possible). + spacings: A list of lists of spacings between the DD gates. + The available slack will be divided according to this. + The list length must be one more than the length of dd_sequence, + and the elements must sum to 1. If None, a balanced spacing + will be used [d/2, d, d, ..., d, d, d/2]. This spacing only + applies to the first subcircuit, if a ``coupling_map`` is + specified + skip_reset_qubits: If True, does not insert DD on idle periods that + immediately follow initialized/reset qubits + (as qubits in the ground state are less susceptible to decoherence). + pulse_alignment: The hardware constraints for gate timing allocation. + This is usually provided from ``backend.configuration().timing_constraints``. + If provided, the delay length, i.e. ``spacing``, is implicitly adjusted to + satisfy this constraint. + extra_slack_distribution: The option to control the behavior of DD sequence generation. + The duration of the DD sequence should be identical to an idle time in the + scheduled quantum circuit, however, the delay in between gates comprising the sequence + should be integer number in units of dt, and it might be further truncated + when ``pulse_alignment`` is specified. This sometimes results in the duration of + the created sequence being shorter than the idle time + that you want to fill with the sequence, i.e. `extra slack`. + This option takes following values. + + * "middle": Put the extra slack to the interval at the middle of the sequence. + * "edges": Divide the extra slack as evenly as possible into + intervals at beginning and end of the sequence. + sequence_min_length_ratios: List of minimum delay length to DD sequence ratio to satisfy + in order to insert the DD sequence. For example if the X-X dynamical decoupling sequence + is 320dt samples long and the available delay is 384dt it has a ratio of 384dt/320dt=1.2. + From the perspective of dynamical decoupling this is likely to add more control noise + than decoupling error rate reductions. The defaults value is 2.0. + insert_multiple_cycles: If the available duration exceeds + 2*sequence_min_length_ratio*duration(dd_sequence) enable the insertion of multiple + rounds of the dynamical decoupling sequence in that delay. + coupling_map: directed graph representing the coupling map for the device. Specifying a + coupling map partitions the device into subcircuits, in order to apply DD sequences + with different pulse spacings within each. Currently support 2 subcircuits. + alt_spacings: A list of lists of spacings between the DD gates, for the second subcircuit, + as determined by the coupling map. If None, a balanced spacing that is staggered with + respect to the first subcircuit will be used [d, d, d, ..., d, d, 0]. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues + for execution on large backends. + Raises: + TranspilerError: When invalid DD sequence is specified. + TranspilerError: When pulse gate with the duration which is + non-multiple of the alignment constraint value is found. + TranspilerError: When the coupling map is not supported (i.e., if degree > 3) + """ + + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self._durations = durations + + # Enforce list of DD sequences + if dd_sequences: + try: + iter(dd_sequences[0]) + except TypeError: + dd_sequences = [dd_sequences] + self._dd_sequences = dd_sequences + self._qubits = qubits + self._skip_reset_qubits = skip_reset_qubits + self._alignment = pulse_alignment + self._coupling_map = coupling_map + self._coupling_coloring = None + + if spacings is not None: + try: + iter(spacings[0]) # type: ignore + except TypeError: + spacings = [spacings] # type: ignore + if alt_spacings is not None: + try: + iter(alt_spacings[0]) # type: ignore + except TypeError: + alt_spacings = [alt_spacings] # type: ignore + self._spacings = spacings + self._alt_spacings = alt_spacings + + if self._spacings and len(self._spacings) != len(self._dd_sequences): + raise TranspilerError("Number of sequence spacings must equal number of DD sequences.") + + if self._alt_spacings: + if not self._coupling_map: + warnings.warn( + "Alternate spacings are ignored because a coupling map was not provided" + ) + elif len(self._alt_spacings) != len(self._dd_sequences): + raise TranspilerError( + "Number of alternate sequence spacings must equal number of DD sequences." + ) + + self._extra_slack_distribution = extra_slack_distribution + + self._dd_sequence_lengths: Dict[Qubit, List[List[Gate]]] = {} + self._sequence_phase = 0 + + if sequence_min_length_ratios is None: + # Use 2.0 as a sane default + self._sequence_min_length_ratios = [2.0 for _ in self._dd_sequences] + else: + try: + iter(sequence_min_length_ratios) # type: ignore + except TypeError: + sequence_min_length_ratios = [sequence_min_length_ratios] # type: ignore + self._sequence_min_length_ratios = sequence_min_length_ratios # type: ignore + + if len(self._sequence_min_length_ratios) != len(self._dd_sequences): + raise TranspilerError("Number of sequence lengths must equal number of DD sequences.") + + self._insert_multiple_cycles = insert_multiple_cycles + + def _pre_runhook(self, dag: DAGCircuit) -> None: + super()._pre_runhook(dag) + + if self._coupling_map: + physical_qubits = [dag.qubits.index(q) for q in dag.qubits] + subgraph = self._coupling_map.graph.subgraph(physical_qubits) + self._coupling_coloring = rx.graph_greedy_color(subgraph.to_undirected()) + if any(c > 1 for c in self._coupling_coloring.values()): + raise TranspilerError( + "This circuit topology is not supported for staggered dynamical decoupling." + "The maximum connectivity is 3 nearest neighbors per qubit." + ) + + spacings_required = self._spacings is None + if spacings_required: + self._spacings = [] # type: ignore + alt_spacings_required = self._alt_spacings is None and self._coupling_map is not None + if alt_spacings_required: + self._alt_spacings = [] # type: ignore + + for seq_idx, seq in enumerate(self._dd_sequences): + num_pulses = len(self._dd_sequences[seq_idx]) + + # Check if physical circuit is given + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("DD runs on physical circuits only.") + + # Set default spacing otherwise validate user input + if spacings_required: + mid = 1 / num_pulses + end = mid / 2 + self._spacings.append([end] + [mid] * (num_pulses - 1) + [end]) # type: ignore + else: + if sum(self._spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + if self._coupling_map: + if alt_spacings_required: + mid = 1 / num_pulses + self._alt_spacings.append([mid] * num_pulses + [0]) # type: ignore + else: + if sum(self._alt_spacings[seq_idx]) != 1 or any( # type: ignore + a < 0 for a in self._alt_spacings[seq_idx] # type: ignore + ): + raise TranspilerError( + "The spacings must be given in terms of fractions " + "of the slack period and sum to 1." + ) + + # Check if DD sequence is identity + if num_pulses != 1: + if num_pulses % 2 != 0: + raise TranspilerError( + "DD sequence must contain an even number of gates (or 1)." + ) + # TODO: this check should use the quantum info package in Qiskit. + noop = np.eye(2) + for gate in self._dd_sequences[seq_idx]: + noop = noop.dot(gate.to_matrix()) + if not matrix_equal(noop, IGate().to_matrix(), ignore_phase=True): + raise TranspilerError("The DD sequence does not make an identity operation.") + self._sequence_phase = np.angle(noop[0][0]) + + # Precompute qubit-wise DD sequence length for performance + for qubit in dag.qubits: + seq_length_ = [] + if qubit not in self._dd_sequence_lengths: + self._dd_sequence_lengths[qubit] = [] + + physical_index = dag.qubits.index(qubit) + if self._qubits and physical_index not in self._qubits: + continue + + for index, gate in enumerate(seq): + try: + # Check calibration. + gate_length = dag.calibrations[gate.name][(physical_index, gate.params)] + if gate_length % self._alignment != 0: + # This is necessary to implement lightweight scheduling logic for this pass. + # Usually the pulse alignment constraint and pulse data chunk size take + # the same value, however, we can intentionally violate this pattern + # at the gate level. For example, we can create a schedule consisting of + # a pi-pulse of 32 dt followed by a post buffer, i.e. delay, of 4 dt + # on the device with 16 dt constraint. Note that the pi-pulse length + # is multiple of 16 dt but the gate length of 36 is not multiple of it. + # Such pulse gate should be excluded. + raise TranspilerError( + f"Pulse gate {gate.name} with length non-multiple of {self._alignment} " + f"is not acceptable in {self.__class__.__name__} pass." + ) + except KeyError: + gate_length = self._durations.get(gate, physical_index) + seq_length_.append(gate_length) + # Update gate duration. + # This is necessary for current timeline drawer, i.e. scheduled. + + if hasattr( + gate, "to_mutable" + ): # TODO this check can be removed after Qiskit 1.0, as it is always True + gate = gate.to_mutable() + seq[index] = gate + gate.duration = gate_length + self._dd_sequence_lengths[qubit].append(seq_length_) + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + # This routine takes care of the pulse alignment constraint for the DD sequence. + # Note that the alignment constraint acts on the t0 of the DAGOpNode. + # Now this constrained scheduling problem is simplified to the problem of + # finding a delay amount which is a multiple of the constraint value by assuming + # that the duration of every DAGOpNode is also a multiple of the constraint value. + # + # For example, given the constraint value of 16 and XY4 with 160 dt gates. + # Here we assume current interval is 992 dt. + # + # relative spacing := [0.125, 0.25, 0.25, 0.25, 0.125] + # slack = 992 dt - 4 x 160 dt = 352 dt + # + # unconstrained sequence: 44dt-X1-88dt-Y2-88dt-X3-88dt-Y4-44dt + # constrained sequence : 32dt-X1-80dt-Y2-80dt-X3-80dt-Y4-32dt + extra slack 48 dt + # + # Now we evenly split extra slack into start and end of the sequence. + # The distributed slack should be multiple of 16. + # Start = +16, End += 32 + # + # final sequence : 48dt-X1-80dt-Y2-80dt-X3-80dt-Y4-64dt / in total 992 dt + # + # Now we verify t0 of every node starts from multiple of 16 dt. + # + # X1: 48 dt (3 x 16 dt) + # Y2: 48 dt + 160 dt + 80 dt = 288 dt (18 x 16 dt) + # Y3: 288 dt + 160 dt + 80 dt = 528 dt (33 x 16 dt) + # Y4: 368 dt + 160 dt + 80 dt = 768 dt (48 x 16 dt) + # + # As you can see, constraints on t0 are all satified without explicit scheduling. + time_interval = t_end - t_start + + if self._qubits and self._block_dag.qubits.index(qubit) not in self._qubits: + # Target physical qubit is not the target of this DD sequence. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + if ( + not isinstance(prev_node, DAGInNode) + and self._skip_reset_qubits + and isinstance(prev_node.op, Reset) + and qubit in prev_node.qargs + ): + self._dirty_qubits.remove(qubit) + + if qubit not in self._dirty_qubits: + # Previous node is the start edge or reset, i.e. qubit is ground state. + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return + + for sequence_idx, _ in enumerate(self._dd_sequences): + dd_sequence = self._dd_sequences[sequence_idx] + seq_lengths = self._dd_sequence_lengths[qubit][sequence_idx] + seq_length = np.sum(seq_lengths) + seq_ratio = self._sequence_min_length_ratios[sequence_idx] + spacings = self._spacings[sequence_idx] + alt_spacings = ( + np.asarray(self._alt_spacings[sequence_idx]) if self._coupling_map else None + ) + + # Verify the delay duration exceeds the minimum time to insert + if time_interval / seq_length <= seq_ratio: + continue + + if self._insert_multiple_cycles: + num_sequences = max(int(time_interval // (seq_length * seq_ratio)), 1) + if (num_sequences % 2 == 1) and len(dd_sequence) == 1: + warnings.warn( + "Sequence would result in an odd number of DD cycles with original DD " + "sequence of length 1. This may result in non-identity sequence insertion " + "and so are defaulting to 1 cycle insertion." + ) + num_sequences = 1 + else: + num_sequences = 1 + + # multiple dd sequences may be inserted + if num_sequences > 1: + dd_sequence = list(dd_sequence) * num_sequences + seq_lengths = seq_lengths * num_sequences + seq_length = np.sum(seq_lengths) + spacings = spacings * num_sequences + + spacings = np.asarray(spacings) / num_sequences + slack = time_interval - seq_length + sequence_gphase = self._sequence_phase + + if slack <= 0: + continue + + if len(dd_sequence) == 1: + # Special case of using a single gate for DD + u_inv = dd_sequence[0].inverse().to_matrix() + theta, phi, lam, phase = OneQubitEulerDecomposer().angles_and_phase(u_inv) + if isinstance(next_node, DAGOpNode) and isinstance(next_node.op, (UGate, U3Gate)): + # Absorb the inverse into the successor (from left in circuit) + theta_r, phi_r, lam_r = next_node.op.params + next_node.op.params = Optimize1qGates.compose_u3( + theta_r, phi_r, lam_r, theta, phi, lam + ) + sequence_gphase += phase + elif isinstance(prev_node, DAGOpNode) and isinstance(prev_node.op, (UGate, U3Gate)): + # Absorb the inverse into the predecessor (from right in circuit) + theta_l, phi_l, lam_l = prev_node.op.params + prev_node.op.params = Optimize1qGates.compose_u3( + theta, phi, lam, theta_l, phi_l, lam_l + ) + sequence_gphase += phase + else: + # Don't do anything if there's no single-qubit gate to absorb the inverse + self._apply_scheduled_op( + block_idx, + t_start, + Delay(time_interval, self._block_dag.unit), + qubit, + ) + return + + def _constrained_length(values: np.array) -> np.array: + return self._alignment * np.floor(values / self._alignment) + + if self._coupling_map: + if self._coupling_coloring[self._dag.qubits.index(qubit)] == 0: + sub_spacings = spacings + else: + sub_spacings = alt_spacings + else: + sub_spacings = spacings + + # (1) Compute DD intervals satisfying the constraint + taus = _constrained_length(slack * sub_spacings) + extra_slack = slack - np.sum(taus) + # (2) Distribute extra slack + if self._extra_slack_distribution == "middle": + mid_ind = int((len(taus) - 1) / 2) + to_middle = _constrained_length(extra_slack) + taus[mid_ind] += to_middle + if extra_slack - to_middle: + # If to_middle is not a multiple value of the pulse alignment, + # it is truncated to the nearest multiple value and + # the rest of slack is added to the end. + taus[-1] += extra_slack - to_middle + elif self._extra_slack_distribution == "edges": + to_begin_edge = _constrained_length(extra_slack / 2) + taus[0] += to_begin_edge + taus[-1] += extra_slack - to_begin_edge + else: + raise TranspilerError( + f"Option extra_slack_distribution = {self._extra_slack_distribution} is invalid." + ) + + # (3) Construct DD sequence with delays + idle_after = t_start + dd_ind = 0 + # Interleave delays with DD sequence operations + for tau_idx, tau in enumerate(taus): + if tau > 0: + self._apply_scheduled_op( + block_idx, idle_after, Delay(tau, self._dag.unit), qubit + ) + idle_after += tau + + # Detect if we are on a sequence boundary + # If so skip insert of sequence to allow delays to combine + # There are two cases. + # 1. The number of delays to be inserted is equal to the number of gates. + # 2. There is an extra delay inserted after the last operation. + # The condition below handles both. + seq_length = int(len(taus) / num_sequences) + if len(dd_sequence) == len(taus) or tau_idx % seq_length != (seq_length - 1): + gate = dd_sequence[dd_ind] + gate_length = seq_lengths[dd_ind] + self._apply_scheduled_op(block_idx, idle_after, gate, qubit) + idle_after += gate_length + dd_ind += 1 + + self._block_dag.global_phase = self._block_dag.global_phase + sequence_gphase + return + + # DD could not be applied, delay instead + self._apply_scheduled_op( + block_idx, t_start, Delay(time_interval, self._block_dag.unit), qubit + ) + return diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py new file mode 100644 index 000000000..fd61f8c49 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/pad_delay.py @@ -0,0 +1,78 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Padding pass to insert Delay into empty timeslots for dynamic circuit backends.""" + +from qiskit.circuit import Qubit +from qiskit.circuit.delay import Delay +from qiskit.dagcircuit import DAGNode, DAGOutNode + +from .block_base_padder import BlockBasePadder + + +class PadDelay(BlockBasePadder): + """Padding idle time with Delay instructions. + + Consecutive delays will be merged in the output of this pass. + + .. code-block::python + + durations = InstructionDurations([("x", None, 160), ("cx", None, 800)]) + + qc = QuantumCircuit(2) + qc.delay(100, 0) + qc.x(1) + qc.cx(0, 1) + + The ASAP-scheduled circuit output may become + + .. parsed-literal:: + + ┌────────────────┐ + q_0: ┤ Delay(160[dt]) ├──■── + └─────┬───┬──────┘┌─┴─┐ + q_1: ──────┤ X ├───────┤ X ├ + └───┘ └───┘ + + Note that the additional idle time of 60dt on the ``q_0`` wire coming from the duration difference + between ``Delay`` of 100dt (``q_0``) and ``XGate`` of 160 dt (``q_1``) is absorbed in + the delay instruction on the ``q_0`` wire, i.e. in total 160 dt. + + See :class:`BlockBasePadder` pass for details. + """ + + def __init__(self, fill_very_end: bool = True, schedule_idle_qubits: bool = False): + """Create new padding delay pass. + + Args: + fill_very_end: Set ``True`` to fill the end of circuit with delay. + schedule_idle_qubits: Set to true if you'd like a delay inserted on idle qubits. + This is useful for timeline visualizations, but may cause issues for execution + on large backends. + """ + super().__init__(schedule_idle_qubits=schedule_idle_qubits) + self.fill_very_end = fill_very_end + + def _pad( + self, + block_idx: int, + qubit: Qubit, + t_start: int, + t_end: int, + next_node: DAGNode, + prev_node: DAGNode, + ) -> None: + if not self.fill_very_end and isinstance(next_node, DAGOutNode): + return + + time_interval = t_end - t_start + self._apply_scheduled_op(block_idx, t_start, Delay(time_interval, "dt"), qubit) diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py new file mode 100644 index 000000000..b18ee32c6 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/scheduler.py @@ -0,0 +1,643 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Scheduler for dynamic circuit backends.""" + +from abc import abstractmethod +from typing import Dict, List, Optional, Union, Set, Tuple +import itertools + +import qiskit +from qiskit.circuit.parameterexpression import ParameterExpression +from qiskit.converters import circuit_to_dag +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.passes.scheduling.time_unit_conversion import TimeUnitConversion + +from qiskit.circuit import Barrier, Clbit, ControlFlowOp, Measure, Qubit, Reset +from qiskit.circuit.bit import Bit +from qiskit.dagcircuit import DAGCircuit, DAGNode +from qiskit.transpiler.exceptions import TranspilerError + +from .utils import block_order_op_nodes + + +class BaseDynamicCircuitAnalysis(TransformationPass): + """Base class for scheduling analysis + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def __init__( + self, durations: qiskit.transpiler.instruction_durations.InstructionDurations + ) -> None: + """Scheduler for dynamic circuit backends. + + Args: + durations: Durations of instructions to be used in scheduling. + """ + self._durations = durations + + self._dag: Optional[DAGCircuit] = None + self._block_dag: Optional[DAGCircuit] = None + self._wire_map: Optional[Dict[Bit, Bit]] = None + self._node_mapped_wires: Optional[Dict[DAGNode, List[Bit]]] = None + self._node_block_dags: Dict[DAGNode, DAGCircuit] = {} + # Mapping of control-flow nodes to their containing blocks + self._block_idx_dag_map: Dict[int, DAGCircuit] = {} + # Mapping of block indices to the respective DAGCircuit + + self._current_block_idx = 0 + self._max_block_t1: Optional[Dict[int, int]] = None + # Track as we build to avoid extra pass + self._control_flow_block = False + self._node_start_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._node_stop_time: Optional[Dict[DAGNode, Tuple[int, int]]] = None + self._bit_stop_times: Optional[Dict[int, Dict[Union[Qubit, Clbit], int]]] = None + # Dictionary of blocks each containing a dictionary with the key for each bit + # in the block and its value being the final time of the bit within the block. + self._current_block_measures: Set[DAGNode] = set() + self._current_block_measures_has_reset: bool = False + self._node_tied_to: Optional[Dict[DAGNode, Set[DAGNode]]] = None + # Nodes that the scheduling of this node is tied to. + self._bit_indices: Optional[Dict[Qubit, int]] = None + + self._time_unit_converter = TimeUnitConversion(durations) + + super().__init__() + + @property + def _current_block_bit_times(self) -> Dict[Union[Qubit, Clbit], int]: + return self._bit_stop_times[self._current_block_idx] + + def _visit_block(self, block: DAGCircuit, wire_map: Dict[Qubit, Qubit]) -> None: + # Push the previous block dag onto the stack + prev_block_dag = self._block_dag + self._block_dag = block + prev_wire_map, self._wire_map = self._wire_map, wire_map + + # We must run this on the individual block + # as the current implementation does not recurse + # into the circuit structure. + self._time_unit_converter.run(block) + self._begin_new_circuit_block() + + for node in block_order_op_nodes(block): + self._visit_node(node) + + # Final flush + self._flush_measures() + + # Pop the previous block dag off the stack restoring it + self._block_dag = prev_block_dag + self._wire_map = prev_wire_map + + def _visit_node(self, node: DAGNode) -> None: + if isinstance(node.op, ControlFlowOp): + self._visit_control_flow_op(node) + elif node.op.condition_bits: + raise TranspilerError( + "c_if control-flow is not supported by this pass. " + 'Please apply "ConvertConditionsToIfOps" to convert these ' + "conditional operations to new-style Qiskit control-flow." + ) + else: + if isinstance(node.op, Measure): + self._visit_measure(node) + elif isinstance(node.op, Reset): + self._visit_reset(node) + else: + self._visit_generic(node) + + def _visit_control_flow_op(self, node: DAGNode) -> None: + # TODO: This is a hack required to tie nodes of control-flow + # blocks across the scheduler and block_base_padder. This is + # because the current control flow nodes store the block as a + # circuit which is not hashable. For processing we are currently + # required to convert each circuit block to a dag which is inefficient + # and causes node relationships stored in analysis to be lost between + # passes as we are constantly recreating the block dags. + # We resolve this here by caching these dags in the property set. + self._node_block_dags[node] = node_block_dags = [] + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + # Duration is 0 as we do not schedule across terminator + t1 = t0 # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + for block in node.op.blocks: + self._control_flow_block = True + + new_dag = circuit_to_dag(block) + inner_wire_map = { + inner: outer + for outer, inner in zip(self._map_wires(node), new_dag.qubits + new_dag.clbits) + } + node_block_dags.append(new_dag) + self._visit_block(new_dag, inner_wire_map) + + # Begin new block for exit to "then" block. + self._begin_new_circuit_block() + + @abstractmethod + def _visit_measure(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_reset(self, node: DAGNode) -> None: + raise NotImplementedError + + @abstractmethod + def _visit_generic(self, node: DAGNode) -> None: + raise NotImplementedError + + def _init_run(self, dag: DAGCircuit) -> None: + """Setup for initial run.""" + + self._dag = dag + self._block_dag = None + self._wire_map = {wire: wire for wire in dag.wires} + self._node_mapped_wires = {} + self._node_block_dags = {} + self._block_idx_dag_map = {} + + self._current_block_idx = 0 + self._max_block_t1 = {} + self._control_flow_block = False + + if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None: + raise TranspilerError("ASAP schedule runs on physical circuits only") + + self._node_start_time = {} + self._node_stop_time = {} + self._bit_stop_times = {0: {q: 0 for q in dag.qubits + dag.clbits}} + self._current_block_measures = set() + self._current_block_measures_has_reset = False + self._node_tied_to = {} + self._bit_indices = {q: index for index, q in enumerate(dag.qubits)} + + def _get_duration(self, node: DAGNode, dag: Optional[DAGCircuit] = None) -> int: + if node.op.condition_bits or isinstance(node.op, ControlFlowOp): + # As we cannot currently schedule through conditionals model + # as zero duration to avoid padding. + return 0 + + indices = [self._bit_indices[qarg] for qarg in self._map_qubits(node)] + + # Fall back to current block dag if not specified. + dag = dag or self._block_dag + + if dag.has_calibration_for(node): + # If node has calibration, this value should be the highest priority + cal_key = tuple(indices), tuple(float(p) for p in node.op.params) + duration = dag.calibrations[node.op.name][cal_key].duration + node.op.duration = duration + else: + duration = node.op.duration + + if isinstance(duration, ParameterExpression): + raise TranspilerError( + f"Parameterized duration ({duration}) " + f"of {node.op.name} on qubits {indices} is not bounded." + ) + if duration is None: + raise TranspilerError(f"Duration of {node.op.name} on qubits {indices} is not found.") + + return duration + + def _update_bit_times( # pylint: disable=invalid-name + self, node: DAGNode, t0: int, t1: int, update_cargs: bool = True + ) -> None: + self._max_block_t1[self._current_block_idx] = max( + self._max_block_t1.get(self._current_block_idx, 0), t1 + ) + + update_bits = self._map_wires(node) if update_cargs else self._map_qubits(node) + for bit in update_bits: + self._current_block_bit_times[bit] = t1 + + self._node_start_time[node] = (self._current_block_idx, t0) + self._node_stop_time[node] = (self._current_block_idx, t1) + + def _begin_new_circuit_block(self) -> None: + """Create a new timed circuit block completing the previous block.""" + self._current_block_idx += 1 + self._block_idx_dag_map[self._current_block_idx] = self._block_dag + self._control_flow_block = False + self._bit_stop_times[self._current_block_idx] = { + self._wire_map[wire]: 0 for wire in self._block_dag.wires + } + self._flush_measures() + + def _flush_measures(self) -> None: + """Flush currently accumulated measurements by resetting block measures.""" + for node in self._current_block_measures: + self._node_tied_to[node] = self._current_block_measures.copy() + + self._current_block_measures = set() + self._current_block_measures_has_reset = False + + def _current_block_measure_qargs(self) -> Set[Qubit]: + return set( + qarg for measure in self._current_block_measures for qarg in self._map_qubits(measure) + ) + + def _check_flush_measures(self, node: DAGNode) -> None: + if self._current_block_measure_qargs() & set(self._map_qubits(node)): + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + + def _map_wires(self, node: DAGNode) -> List[Qubit]: + """Map the wires from the current node to the top-level block's wires. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + if node not in self._node_mapped_wires: + self._node_mapped_wires[node] = wire_map = [ + self._wire_map[q] for q in node.qargs + node.cargs + ] + return wire_map + + return self._node_mapped_wires[node] + + def _map_qubits(self, node: DAGNode) -> List[Qubit]: + """Map the qubits from the current node to the top-level block's qubits. + + TODO: We should have an easier approach to wire mapping from the transpiler. + """ + return [wire for wire in self._map_wires(node) if isinstance(wire, Qubit)] + + +class ASAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-soon-as-possible (ASAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ASAP scheduler in which instructions + start as early as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the ALAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + (self._node_start_time[measure][1] for measure in self._current_block_measures), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + +class ALAPScheduleAnalysis(BaseDynamicCircuitAnalysis): + """Dynamic circuits as-late-as-possible (ALAP) scheduling analysis pass. + + This is a scheduler designed to work for the unique scheduling constraints of the dynamic circuits + backends due to the limitations imposed by hardware. This is expected to evolve over time as the + dynamic circuit backends also change. + + In its current form this is similar to Qiskit's ALAP scheduler in which instructions + start as late as possible. + + The primary differences are that: + + * Resets and control-flow currently trigger the end of a "quantum block". The period between the end + of the block and the next is *nondeterministic* + ie., we do not know when the next block will begin (as we could be evaluating a classical + function of nondeterministic length) and therefore the + next block starts at a *relative* t=0. + * During a measurement it is possible to apply gates in parallel on disjoint qubits. + * Measurements and resets on disjoint qubits happen simultaneously and are part of the same block. + """ + + def run(self, dag: DAGCircuit) -> None: + """Run the ASAPSchedule pass on `dag`. + Args: + dag (DAGCircuit): DAG to schedule. + Raises: + TranspilerError: if the circuit is not mapped on physical qubits. + TranspilerError: if conditional bit is added to non-supported instruction. + Returns: + The scheduled DAGCircuit. + """ + self._init_run(dag) + + # Trivial wire map at the top-level + wire_map = {wire: wire for wire in dag.wires} + # Top-level dag is the entry block + self._visit_block(dag, wire_map) + self._push_block_durations() + self.property_set["node_start_time"] = self._node_start_time + self.property_set["node_block_dags"] = self._node_block_dags + return dag + + def _visit_measure(self, node: DAGNode) -> None: + """Visit a measurement node. + + Measurement currently triggers the end of a deterministically scheduled block + of instructions in IBM dynamic circuits hardware. + This means that it is possible to schedule *up to* a measurement (and during its pulses) + but the measurement will be followed by a period of indeterminism. + All measurements on disjoint qubits that topologically follow another + measurement will be collected and performed in parallel. A measurement on a qubit + intersecting with the set of qubits to be measured in parallel will trigger the + end of a scheduling block with said measurement occurring in a following block + which begins another grouping sequence. This behavior will change in future + backend software updates.""" + + current_block_measure_qargs = self._current_block_measure_qargs() + # We handle a set of qubits here as _visit_reset currently calls + # this method and a reset may have multiple qubits. + measure_qargs = set(self._map_qubits(node)) + + t0q = max( + self._current_block_bit_times[q] for q in measure_qargs + ) # pylint: disable=invalid-name + + # If the measurement qubits overlap, we need to flush measurements and start a + # new scheduling block. + if current_block_measure_qargs & measure_qargs: + if self._current_block_measures_has_reset: + # If a reset is included we must trigger the end of a block. + self._begin_new_circuit_block() + t0q = 0 + else: + # Otherwise just trigger a measurement flush + self._flush_measures() + else: + # Otherwise we need to increment all measurements to start at the same time within the block. + t0q = max( # pylint: disable=invalid-name + itertools.chain( + [t0q], + (self._node_start_time[measure][1] for measure in self._current_block_measures), + ) + ) + + # Insert this measure into the block + self._current_block_measures.add(node) + + for measure in self._current_block_measures: + t0 = t0q # pylint: disable=invalid-name + bit_indices = {bit: index for index, bit in enumerate(self._block_dag.qubits)} + measure_duration = self._durations.get( + Measure(), + [bit_indices[qarg] for qarg in self._map_qubits(measure)], + unit="dt", + ) + t1 = t0 + measure_duration # pylint: disable=invalid-name + self._update_bit_times(measure, t0, t1) + + def _visit_reset(self, node: DAGNode) -> None: + """Visit a reset node. + + Reset currently triggers the end of a pulse block in IBM dynamic circuits hardware + as conditional reset is performed internally using a c_if. This means that it is + possible to schedule *up to* a reset (and during its measurement pulses) + but the reset will be followed by a period of conditional indeterminism. + All resets on disjoint qubits will be collected on the same qubits to be run simultaneously. + """ + # Process as measurement + self._current_block_measures_has_reset = True + self._visit_measure(node) + # Then set that we are now a conditional node. + self._control_flow_block = True + + def _visit_generic(self, node: DAGNode) -> None: + """Visit a generic node such as a gate or barrier.""" + + # If True we are coming from a conditional block. + # start a new block for the unconditional operations. + if self._control_flow_block: + self._begin_new_circuit_block() + + op_duration = self._get_duration(node) + + # If the measurement qubits overlap, we need to flush the measurement group + self._check_flush_measures(node) + + t0 = max( # pylint: disable=invalid-name + self._current_block_bit_times[bit] for bit in self._map_wires(node) + ) + + t1 = t0 + op_duration # pylint: disable=invalid-name + self._update_bit_times(node, t0, t1) + + def _push_block_durations(self) -> None: + """After scheduling of each block, pass over and push the times of all nodes.""" + + # Store the next available time to push to for the block by bit + block_bit_times = {} + # Iterated nodes starting at the first, from the node with the + # last time, preferring barriers over non-barriers + + def order_ops(item: Tuple[DAGNode, Tuple[int, int]]) -> Tuple[int, int, bool, int]: + """Iterated nodes ordering by channel, time and preferring that barriers are processed + first.""" + return ( + item[1][0], + -item[1][1], + not isinstance(item[0].op, Barrier), + self._get_duration(item[0], dag=self._block_idx_dag_map[item[1][0]]), + ) + + iterate_nodes = sorted(self._node_stop_time.items(), key=order_ops) + + new_node_start_time = {} + new_node_stop_time = {} + + def _calculate_new_times( + block: int, node: DAGNode, block_bit_times: Dict[int, Dict[Qubit, int]] + ) -> int: + max_block_time = min(block_bit_times[block][bit] for bit in self._map_qubits(node)) + + t0 = self._node_start_time[node][1] # pylint: disable=invalid-name + t1 = self._node_stop_time[node][1] # pylint: disable=invalid-name + # Determine how much to shift by + node_offset = max_block_time - t1 + new_t0 = t0 + node_offset + return new_t0 + + scheduled = set() + + def _update_time( + block: int, + node: DAGNode, + new_time: int, + block_bit_times: Dict[int, Dict[Qubit, int]], + ) -> None: + scheduled.add(node) + + new_node_start_time[node] = (block, new_time) + new_node_stop_time[node] = ( + block, + new_time + self._get_duration(node, dag=self._block_idx_dag_map[block]), + ) + + # Update available times by bit + for bit in self._map_qubits(node): + block_bit_times[block][bit] = new_time + + for node, ( + block, + _, + ) in iterate_nodes: # pylint: disable=invalid-name + # skip already scheduled + if node in scheduled: + continue + # Start with last time as the time to push to + if block not in block_bit_times: + block_bit_times[block] = {q: self._max_block_t1[block] for q in self._dag.wires} + + # Calculate the latest available time to push to collectively for tied nodes + tied_nodes = self._node_tied_to.get(node, None) + if tied_nodes is not None: + # Take the minimum time that will be schedulable + # self._node_tied_to includes the node itself. + new_times = [ + _calculate_new_times(block, tied_node, block_bit_times) + for tied_node in self._node_tied_to[node] + ] + new_time = min(new_times) + for tied_node in tied_nodes: + _update_time(block, tied_node, new_time, block_bit_times) + + else: + new_t0 = _calculate_new_times(block, node, block_bit_times) + _update_time(block, node, new_t0, block_bit_times) + + self._node_start_time = new_node_start_time + self._node_stop_time = new_node_stop_time diff --git a/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py new file mode 100644 index 000000000..bf7665cd1 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/passes/scheduling/utils.py @@ -0,0 +1,287 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Utility functions for scheduling passes.""" + +import warnings +from typing import List, Generator, Optional, Tuple, Union + +from qiskit.circuit import ControlFlowOp, Measure, Reset, Parameter +from qiskit.dagcircuit import DAGCircuit, DAGOpNode +from qiskit.transpiler.instruction_durations import ( + InstructionDurations, + InstructionDurationsType, +) +from qiskit.transpiler.exceptions import TranspilerError + + +def block_order_op_nodes(dag: DAGCircuit) -> Generator[DAGOpNode, None, None]: + """Yield nodes such that they are sorted into groups of blocks that minimize synchronization. + + Measurements are also grouped. + """ + + def _is_grouped_measure(node: DAGOpNode) -> bool: + """Does this node need to be grouped?""" + return isinstance(node.op, (Reset, Measure)) + + def _is_block_trigger(node: DAGOpNode) -> bool: + """Does this node trigger the end of a block?""" + return isinstance(node.op, ControlFlowOp) + + def _emit( + node: DAGOpNode, + grouped_measure: List[DAGOpNode], + block_triggers: List[DAGOpNode], + ) -> bool: + """Should we emit this node?""" + for measure in grouped_measure: + if dag.is_predecessor(node, measure): + return True + for block_trigger in block_triggers: + if dag.is_predecessor(node, block_trigger): + return True + + return _is_grouped_measure(node) or _is_block_trigger(node) + + # Begin processing nodes in order + next_nodes = dag.topological_op_nodes() + while next_nodes: + curr_nodes = next_nodes # Setup the next iteration nodes + next_nodes_set = set() # Nodes that will make it into the next iteration + next_nodes = [] # Nodes to process in order in the next iteration + to_push = [] # Do we push this to the very last block? + yield_measures = [] # Measures/resets we will yield first + yield_block_triggers = [] # Followed by block triggers (conditionals) + block_break = False # Did we encounter a block trigger in this iteration? + for node in curr_nodes: + # If we have added this node to the next set of nodes + # skip for now. + if node in next_nodes_set: + next_nodes.append(node) + continue + + # If this nodes is a measurement + # push on the measurements to process + if _is_grouped_measure(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_measures.append(node) + # If this node is a block push this onto + # the block trigger list. + elif _is_block_trigger(node): + block_break = True + node_descendants = dag.descendants(node) + next_nodes_set |= set(node_descendants) + yield_block_triggers.append(node) + # Otherwise we push onto the final list of blocks to emit + # as part of the final block. + else: + to_push.append(node) + + new_to_push = [] + for node in to_push: + node_descendants = dag.descendants(node) + if any( + _emit(descendant, yield_measures, yield_block_triggers) + for descendant in node_descendants + if isinstance(descendant, DAGOpNode) + ): + yield node + else: + new_to_push.append(node) + + to_push = new_to_push + + # First emit the measurements which will feed + for node in yield_measures: + yield node + # Into the block triggers we will emit. + for node in yield_block_triggers: + yield node + + # We're at the last block and emit the final nodes + if not block_break: + for node in to_push: + yield node + break + # Otherwise emit the final nodes + # Add to the front of the list to be processed next + to_push.extend(next_nodes) + next_nodes = to_push + + +InstrKey = Union[ + Tuple[str, None, None], + Tuple[str, Tuple[int], None], + Tuple[str, Tuple[int], Tuple[Parameter]], +] + + +class DynamicCircuitInstructionDurations(InstructionDurations): + """For dynamic circuits the IBM Qiskit backend currently + reports instruction durations that differ compared with those + required for the legacy Qobj-based path. For now we use this + class to report updated InstructionDurations. + TODO: This would be mitigated by a specialized Backend/Target for + dynamic circuit backends. + """ + + MEASURE_PATCH_CYCLES = 160 + MEASURE_PATCH_ODD_OFFSET = 64 + + def __init__( + self, + instruction_durations: Optional[InstructionDurationsType] = None, + dt: float = None, + enable_patching: bool = True, + ): + """Dynamic circuit instruction durations.""" + self._enable_patching = enable_patching + super().__init__(instruction_durations=instruction_durations, dt=dt) + + def update( + self, inst_durations: Optional[InstructionDurationsType], dt: float = None + ) -> "DynamicCircuitInstructionDurations": + """Update self with inst_durations (inst_durations overwrite self). Overrides the default + durations for certain hardcoded instructions. + + Args: + inst_durations: Instruction durations to be merged into self (overwriting self). + dt: Sampling duration in seconds of the target backend. + + Returns: + InstructionDurations: The updated InstructionDurations. + + Raises: + TranspilerError: If the format of instruction_durations is invalid. + """ + + # First update as normal + super().update(inst_durations, dt=dt) + + if not self._enable_patching or inst_durations is None: + return self + + # Then update required instructions. This code is ugly + # because the InstructionDurations code is handling too many + # formats in update and this code must also. + if isinstance(inst_durations, InstructionDurations): + for key in inst_durations.keys(): + self._patch_instruction(key) + else: + for name, qubits, _, parameters, _ in inst_durations: + if isinstance(qubits, int): + qubits = [qubits] + + if isinstance(parameters, (int, float)): + parameters = [parameters] + + if qubits is None: + key = (name, None, None) + elif parameters is None: + key = (name, tuple(qubits), None) + else: + key = (name, tuple(qubits), tuple(parameters)) + + self._patch_instruction(key) + + return self + + def _patch_instruction(self, key: InstrKey) -> None: + """Dispatcher logic for instruction patches""" + name = key[0] + if name == "measure": + self._patch_measurement(key) + elif name == "reset": + self._patch_reset(key) + + def _patch_measurement(self, key: InstrKey) -> None: + """Patch measurement duration by extending duration by 160dt as temporarily + required by the dynamic circuit backend. + """ + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key(key, prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, unit) + # Enforce patching of reset on measurement update + self._patch_reset(("reset", key[1], key[2])) + + def _patch_reset(self, key: InstrKey) -> None: + """Patch reset duration by extending duration by measurement patch as temporarily + required by the dynamic circuit backend. + """ + # We patch the reset to be the duration of the measurement if it + # is available as it currently + # triggers the end of scheduling after the measurement pulse + measure_key = ("measure", key[1], key[2]) + try: + measure_duration, unit = self._get_duration_dt(measure_key) + self._patch_key(key, measure_duration, unit) + except KeyError: + # Fall back to reset key if measure not available + prev_duration, unit = self._get_duration_dt(key) + if unit != "dt": + raise TranspilerError('Can currently only patch durations of "dt".') + odd_cycle_correction = self._get_odd_cycle_correction() + self._patch_key( + key, + prev_duration + self.MEASURE_PATCH_CYCLES + odd_cycle_correction, + unit, + ) + + def _get_duration_dt(self, key: InstrKey) -> Tuple[int, str]: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + return self.duration_by_name[key[0]] + elif key[2] is None: + return self.duration_by_name_qubits[(key[0], key[1])] + + return self.duration_by_name_qubits_params[key] + + def _patch_key(self, key: InstrKey, duration: int, unit: str) -> None: + """Handling for the complicated structure of this class. + + TODO: This class implementation should be simplified in Qiskit. Too many edge cases. + """ + if key[1] is None and key[2] is None: + self.duration_by_name[key[0]] = (duration, unit) + elif key[2] is None: + self.duration_by_name_qubits[(key[0], key[1])] = (duration, unit) + + self.duration_by_name_qubits_params[key] = (duration, unit) + + def _get_odd_cycle_correction(self) -> int: + """Determine the amount of the odd cycle correction to apply + For devices with short gates with odd lenghts we add an extra 16dt to the measurement + + TODO: Eliminate the need for this correction + """ + key_pulse = "sx" + key_qubit = 0 + try: + key_duration = self.get(key_pulse, key_qubit, "dt") + except TranspilerError: + warnings.warn( + f"No {key_pulse} gate found for {key_qubit} for detection of " + "short odd gate lengths, default measurement timing will be used." + ) + key_duration = 160 # keyPulse gate not found + + if key_duration < 160 and key_duration % 32: + return self.MEASURE_PATCH_ODD_OFFSET + return 0 diff --git a/qiskit_ibm_runtime/transpiler/plugin.py b/qiskit_ibm_runtime/transpiler/plugin.py new file mode 100644 index 000000000..75f70cfe4 --- /dev/null +++ b/qiskit_ibm_runtime/transpiler/plugin.py @@ -0,0 +1,98 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Plugin for IBM provider backend transpiler stages.""" + +from typing import Optional + +from qiskit.transpiler.passmanager import PassManager +from qiskit.transpiler.passmanager_config import PassManagerConfig +from qiskit.transpiler.preset_passmanagers.plugin import PassManagerStagePlugin +from qiskit.transpiler.preset_passmanagers import common +from qiskit.transpiler.passes import ConvertConditionsToIfOps + +from qiskit_ibm_provider.transpiler.passes.basis.convert_id_to_delay import ( + ConvertIdToDelay, +) + + +class IBMTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + plugin_passes = [] + instruction_durations = pass_manager_config.instruction_durations + if instruction_durations: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + return PassManager(plugin_passes) + translator_pm + + +class IBMDynamicTranslationPlugin(PassManagerStagePlugin): + """A translation stage plugin for targeting Qiskit circuits + to IBM Quantum systems.""" + + def pass_manager( + self, + pass_manager_config: PassManagerConfig, + optimization_level: Optional[int] = None, + ) -> PassManager: + """Build IBMTranslationPlugin PassManager.""" + + translator_pm = common.generate_translation_passmanager( + target=pass_manager_config.target, + basis_gates=pass_manager_config.basis_gates, + approximation_degree=pass_manager_config.approximation_degree, + coupling_map=pass_manager_config.coupling_map, + backend_props=pass_manager_config.backend_properties, + unitary_synthesis_method=pass_manager_config.unitary_synthesis_method, + unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config, + hls_config=pass_manager_config.hls_config, + ) + + instruction_durations = pass_manager_config.instruction_durations + plugin_passes = [] + if pass_manager_config.target is not None: + id_supported = "id" in pass_manager_config.target + else: + id_supported = "id" in pass_manager_config.basis_gates + + if instruction_durations and not id_supported: + plugin_passes.append(ConvertIdToDelay(instruction_durations)) + + # Only inject control-flow conversion pass at level 0 and level 1. As of + # qiskit 0.22.x transpile() with level 2 and 3 does not support + # control flow instructions (including if_else). This can be + # removed when higher optimization levels support control flow + # instructions. + if optimization_level in {0, 1}: + plugin_passes += [ConvertConditionsToIfOps()] + + return PassManager(plugin_passes) + translator_pm diff --git a/qiskit_ibm_runtime/utils/utils.py b/qiskit_ibm_runtime/utils/utils.py index 271ee515b..83f2aeb42 100644 --- a/qiskit_ibm_runtime/utils/utils.py +++ b/qiskit_ibm_runtime/utils/utils.py @@ -19,7 +19,7 @@ import hashlib from queue import Queue from threading import Condition -from typing import List, Optional, Any, Dict, Union, Tuple, Type +from typing import List, Optional, Any, Dict, Union, Tuple from urllib.parse import urlparse import requests @@ -29,20 +29,19 @@ from ibm_platform_services import ResourceControllerV2 # pylint: disable=import-error -def validate_job_tags(job_tags: Optional[List[str]], exception: Type[Exception]) -> None: +def validate_job_tags(job_tags: Optional[List[str]]) -> None: """Validates input job tags. Args: job_tags: Job tags to be validated. - exception: Exception to raise if the tags are invalid. Raises: - Exception: If the job tags are invalid. + ValueError: If the job tags are invalid. """ if job_tags and ( not isinstance(job_tags, list) or not all(isinstance(tag, str) for tag in job_tags) ): - raise exception("job_tags needs to be a list of strings.") + raise ValueError("job_tags needs to be a list of strings.") def get_iam_api_url(cloud_url: str) -> str: diff --git a/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml b/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml new file mode 100644 index 000000000..5322a9b7e --- /dev/null +++ b/releasenotes/notes/backend_run-d5a92a4d677da6c1.yaml @@ -0,0 +1,6 @@ +--- + +features: + - | + Added support for ``backend.run()``. The functionality is similar to that in ``qiskit-ibm-provider``. + diff --git a/setup.py b/setup.py index cd35c8e70..ce4e52f10 100644 --- a/setup.py +++ b/setup.py @@ -77,4 +77,10 @@ "Documentation": "https://qiskit.org/documentation/", "Source Code": "https://github.com/Qiskit/qiskit-ibm-runtime", }, + entry_points={ + "qiskit.transpiler.translation": [ + "ibm_backend = qiskit_ibm_runtime.transpiler.plugin:IBMTranslationPlugin", + "ibm_dynamic_circuits = qiskit_ibm_runtime.transpiler.plugin:IBMDynamicTranslationPlugin", + ] + }, ) diff --git a/test/decorators.py b/test/decorators.py index be43830ca..007b7944e 100644 --- a/test/decorators.py +++ b/test/decorators.py @@ -156,3 +156,50 @@ class IntegrationTestDependencies: token: str channel: str url: str + + +def integration_test_setup_with_backend( + backend_name: Optional[str] = None, + simulator: Optional[bool] = True, + min_num_qubits: Optional[int] = None, + staging: Optional[bool] = True, +) -> Callable: + """Returns a decorator that retrieves the appropriate backend to use for testing. + + Either retrieves the backend via its name (if specified), or selects the least busy backend that + matches all given filter criteria. + + Args: + backend_name: The name of the backend. + simulator: If set to True, the list of suitable backends is limited to simulators. + min_num_qubits: Minimum number of qubits the backend has to have. + + Returns: + Decorator that retrieves the appropriate backend to use for testing. + """ + + def _decorator(func): + @wraps(func) + @integration_test_setup() + def _wrapper(self, *args, **kwargs): + dependencies: IntegrationTestDependencies = kwargs["dependencies"] + service = dependencies.service + if not staging: + raise SkipTest("Tests not supported on staging.") + if backend_name: + _backend = service.backend(name=backend_name) + else: + _backend = service.least_busy( + min_num_qubits=min_num_qubits, + simulator=simulator, + ) + if not _backend: + # pylint: disable=broad-exception-raised + raise Exception("Unable to find a suitable backend.") + + kwargs["backend"] = _backend + func(self, *args, **kwargs) + + return _wrapper + + return _decorator diff --git a/test/fake_account_client.py b/test/fake_account_client.py new file mode 100644 index 000000000..5a2ec9654 --- /dev/null +++ b/test/fake_account_client.py @@ -0,0 +1,531 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Fake AccountClient.""" + +import copy + +# TODO This can probably be merged with the one in test_ibm_job_states +import time +import uuid +import warnings +from concurrent.futures import ThreadPoolExecutor, wait +from datetime import timedelta, datetime +from random import randrange +from typing import Dict, Any + +from qiskit.providers.fake_provider.backends.poughkeepsie.fake_poughkeepsie import ( + FakePoughkeepsie, +) + +from qiskit_ibm_provider.api.exceptions import ( + RequestsApiError, + UserTimeoutExceededError, +) +from qiskit_ibm_provider.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES + +VALID_RESULT_RESPONSE = { + "backend_name": "ibmqx2", + "backend_version": "1.1.1", + "job_id": "XC1323XG2", + "qobj_id": "Experiment1", + "success": True, + "results": [], +} +"""A valid job result response.""" + +VALID_RESULT = { + "header": { + "name": "Bell state", + "creg_sizes": [["c", 2]], + "clbit_labels": [["c", 0], ["c", 1]], + "qubit_labels": [["q", 0], ["q", 1]], + }, + "shots": 1024, + "status": "DONE", + "success": True, + "data": {"counts": {"0x0": 484, "0x3": 540}}, +} + +API_STATUS_TO_INT = { + ApiJobStatus.CREATING: 0, + ApiJobStatus.VALIDATING: 1, + ApiJobStatus.QUEUED: 2, + ApiJobStatus.RUNNING: 3, + ApiJobStatus.COMPLETED: 4, + ApiJobStatus.ERROR_RUNNING_JOB: 4, + ApiJobStatus.ERROR_VALIDATING_JOB: 4, + ApiJobStatus.CANCELLED: 4, +} + + +class BaseFakeJob: + """Base class for faking a remote job.""" + + _job_progress = [ + ApiJobStatus.CREATING, + ApiJobStatus.VALIDATING, + ApiJobStatus.QUEUED, + ApiJobStatus.RUNNING, + ApiJobStatus.COMPLETED, + ] + + def __init__( + self, + executor, + job_id, + qobj, + backend_name, + job_tags=None, + job_name=None, + experiment_id=None, + run_mode=None, + progress_time=0.5, + **kwargs, + ): + """Initialize a fake job.""" + self._job_id = job_id + self._status = ApiJobStatus.CREATING + self.qobj = qobj + self._result = None + self._backend_name = backend_name + self._job_tags = job_tags + self._job_name = job_name + self._experiment_id = experiment_id + self._creation_date = datetime.now() + self._run_mode = run_mode + self._queue_pos = kwargs.pop("queue_pos", "auto") + self._comp_time = kwargs.pop("est_completion", "auto") + self._queue_info = None + self._progress_time = progress_time + self._future = executor.submit(self._auto_progress) + + def _auto_progress(self): + """Automatically update job status.""" + for status in self._job_progress: + time.sleep(self._progress_time) + self._status = status + + if self._status == ApiJobStatus.COMPLETED: + self._save_result() + elif self._status == ApiJobStatus.ERROR_RUNNING_JOB: + self._save_bad_result() + + def _save_result(self): + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + for _ in range(len(self.qobj["experiments"])): + valid_result = copy.deepcopy(VALID_RESULT) + counts = randrange(1024) + valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts} + new_result["results"].append(valid_result) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + self._result = new_result + + def _save_bad_result(self): + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + new_result["success"] = False + new_result["error"] = {"message": "Kaboom", "code": 1234} + self._result = new_result + + def data(self): + """Return job data.""" + status = self._status + data = { + "job_id": self._job_id, + "kind": "q-object", + "status": status.value, + "creation_date": self._creation_date.isoformat(), + "_backend_info": {"name": self._backend_name}, + "client_info": {"qiskit": "0.23.5"}, + } + if self._job_tags: + data["tags"] = self._job_tags.copy() + if self._job_name: + data["name"] = self._job_name + if self._experiment_id: + data["experiment_id"] = self._experiment_id + if status == ApiJobStatus.ERROR_VALIDATING_JOB: + data["error"] = {"message": "Validation failed.", "code": 1234} + if status in [ApiJobStatus.RUNNING] + list(API_JOB_FINAL_STATES) and self._run_mode: + data["run_mode"] = self._run_mode + + time_per_step = {} + timestamp = self._creation_date + for api_stat in API_STATUS_TO_INT: # pylint: disable=consider-using-dict-items + if API_STATUS_TO_INT[status] > API_STATUS_TO_INT[api_stat]: + time_per_step[api_stat.value] = timestamp.isoformat() + timestamp += timedelta(seconds=30) + elif status == api_stat: + time_per_step[api_stat.value] = timestamp.isoformat() + timestamp += timedelta(seconds=30) + data["time_per_step"] = time_per_step + + return data + + def _get_info_queue(self): + self._queue_info = { + "status": "PENDING_IN_QUEUE", + "position": randrange(1, 10) if self._queue_pos == "auto" else self._queue_pos, + } + if self._queue_info["position"] is None: + return self._queue_info + + est_comp_ts = ( + self._creation_date + timedelta(minutes=10 * self._queue_info["position"]) + if self._comp_time == "auto" + else self._comp_time + ) + if est_comp_ts is None: + return self._queue_info + + self._queue_info["estimated_complete_time"] = est_comp_ts.isoformat() + self._queue_info["estimated_start_time"] = (est_comp_ts - timedelta(minutes=20)).isoformat() + + return self._queue_info + + def cancel(self): + """Cancel the job.""" + self._future.cancel() + wait([self._future]) + self._status = ApiJobStatus.CANCELLED + self._result = None + + def result(self): + """Return job result.""" + if not self._result: + raise RequestsApiError("Result is not available") + return self._result + + def status_data(self): + """Return job status data, including queue info.""" + status = self._status + data = {"status": status.value} + if status == ApiJobStatus.QUEUED: + data["info_queue"] = self._get_info_queue() + return data + + def status(self): + """Return job status.""" + return self._status + + def name(self): + """Return job name.""" + return self._job_name + + +class CancelableFakeJob(BaseFakeJob): + """Fake job that can be canceled.""" + + _job_progress = [ + ApiJobStatus.CREATING, + ApiJobStatus.VALIDATING, + ApiJobStatus.RUNNING, + ] + + +class NewFieldFakeJob(BaseFakeJob): + """Fake job that contains additional fields.""" + + def data(self): + """Return job data.""" + data = super().data() + data["new_field"] = "foo" + return data + + +class MissingFieldFakeJob(BaseFakeJob): + """Fake job that does not contain required fields.""" + + def data(self): + """Return job data.""" + data = super().data() + del data["job_id"] + return data + + +class FailedFakeJob(BaseFakeJob): + """Fake job that fails.""" + + _job_progress = [ApiJobStatus.CREATING, ApiJobStatus.VALIDATING] + + def __init__(self, *args, **kwargs): + # failure_type can be "validation", "result", or "partial" + self._failure_type = kwargs.pop("failure_type", "validation") + self._job_progress = FailedFakeJob._job_progress.copy() + if self._failure_type == "validation": + self._job_progress.append(ApiJobStatus.ERROR_VALIDATING_JOB) + else: + self._job_progress.extend([ApiJobStatus.RUNNING, ApiJobStatus.ERROR_RUNNING_JOB]) + super().__init__(*args, **kwargs) + + def _save_bad_result(self): + if self._failure_type != "partial": + super()._save_bad_result() + return + new_result = copy.deepcopy(VALID_RESULT_RESPONSE) + new_result["job_id"] = self._job_id + new_result["backend_name"] = self._backend_name + new_result["success"] = False + # Good first result. + valid_result = copy.deepcopy(VALID_RESULT) + counts = randrange(1024) + valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts} + new_result["results"].append(valid_result) + + for _ in range(1, len(self.qobj["experiments"])): + valid_result = copy.deepcopy(VALID_RESULT) + valid_result["success"] = False + valid_result["status"] = "This circuit failed." + new_result["results"].append(valid_result) + self._result = new_result + + +class FixedStatusFakeJob(BaseFakeJob): + """Fake job that stays in a specific status.""" + + def __init__(self, *args, **kwargs): + self._fixed_status = kwargs.pop("fixed_status") + super().__init__(*args, **kwargs) + + def _auto_progress(self): + """Automatically update job status.""" + for status in self._job_progress: + time.sleep(0.5) + self._status = status + if status == self._fixed_status: + break + + if self._status == ApiJobStatus.COMPLETED: + self._save_result() + + +class BaseFakeAccountClient: + """Base class for faking the AccountClient.""" + + def __init__( + self, + job_limit=-1, + job_class=BaseFakeJob, + job_kwargs=None, + props_count=None, + queue_positions=None, + est_completion=None, + run_mode=None, + ): + """Initialize a fake account client.""" + self._jobs = {} + self._results_retrieved = set() + self._job_limit = job_limit + self._executor = ThreadPoolExecutor() + self._job_class = job_class + if isinstance(self._job_class, list): + self._job_class.reverse() + self._job_kwargs = job_kwargs or {} + self._props_count = props_count or 0 + self._props_date = datetime.now().isoformat() + self._queue_positions = queue_positions.copy() if queue_positions else [] + self._queue_positions.reverse() + self._est_completion = est_completion.copy() if est_completion else [] + self._est_completion.reverse() + self._run_mode = run_mode + self._default_job_class = BaseFakeJob + + def list_jobs(self, limit, skip, descending=True, extra_filter=None): + """Return a list of jobs.""" + # pylint: disable=unused-argument + extra_filter = extra_filter or {} + if all(fil in extra_filter for fil in ["creationDate", "id"]): + return {} + tag = extra_filter.get("tags", None) + all_job_data = [] + for job in list(self._jobs.values())[skip : skip + limit]: + job_data = job.data() + if tag is None or tag in job_data["tags"]: + all_job_data.append(job_data) + if not descending: + all_job_data.reverse() + return all_job_data + + def job_submit( + self, + backend_name, + qobj_dict, + job_name, + job_tags, + experiment_id, + *_args, + **_kwargs, + ): + """Submit a Qobj to a device.""" + if self._job_limit != -1 and self._unfinished_jobs() >= self._job_limit: + raise RequestsApiError( + "400 Client Error: Bad Request for url: . Reached " + "maximum number of concurrent jobs, Error code: 3458." + ) + + new_job_id = uuid.uuid4().hex + if isinstance(self._job_class, list): + job_class = self._job_class.pop() if self._job_class else self._default_job_class + else: + job_class = self._job_class + job_kwargs = copy.copy(self._job_kwargs) + if self._queue_positions: + job_kwargs["queue_pos"] = self._queue_positions.pop() + if self._est_completion: + job_kwargs["est_completion"] = self._est_completion.pop() + + run_mode = self._run_mode + if run_mode == "dedicated_once": + run_mode = "dedicated" + self._run_mode = "fairshare" + + new_job = job_class( + executor=self._executor, + job_id=new_job_id, + qobj=qobj_dict, + backend_name=backend_name, + job_tags=job_tags, + job_name=job_name, + experiment_id=experiment_id, + run_mode=run_mode, + **job_kwargs, + ) + self._jobs[new_job_id] = new_job + return new_job.data() + + def job_download_qobj(self, job_id, *_args, **_kwargs): + """Retrieve and return a Qobj.""" + return copy.deepcopy(self._get_job(job_id).qobj) + + def job_result(self, job_id, *_args, **_kwargs): + """Return a random job result.""" + if job_id in self._results_retrieved: + warnings.warn(f"Result already retrieved for job {job_id}") + self._results_retrieved.add(job_id) + return self._get_job(job_id).result() + + def job_get(self, job_id, *_args, **_kwargs): + """Return information about a job.""" + return self._get_job(job_id).data() + + def job_status(self, job_id, *_args, **_kwargs): + """Return the status of a job.""" + return self._get_job(job_id).status_data() + + def job_final_status(self, job_id, *_args, **_kwargs): + """Wait until the job progress to a final state.""" + job = self._get_job(job_id) + status = job.status() + while status not in API_JOB_FINAL_STATES: + time.sleep(0.5) + status_data = job.status_data() + status = ApiJobStatus(status_data["status"]) + if _kwargs.get("status_queue", None): + data = {"status": status.value} + if status is ApiJobStatus.QUEUED: + data["infoQueue"] = {"status": "PENDING_IN_QUEUE", "position": 1} + _kwargs["status_queue"].put(status_data) + return self.job_status(job_id) + + def job_properties(self, *_args, **_kwargs): + """Return the backend properties of a job.""" + props = FakePoughkeepsie().properties().to_dict() + if self._props_count > 0: + self._props_count -= 1 + new_dt = datetime.now() + timedelta(hours=randrange(300)) + self._props_date = new_dt.isoformat() + props["last_update_date"] = self._props_date + return props + + def job_cancel(self, job_id, *_args, **_kwargs): + """Submit a request for cancelling a job.""" + self._get_job(job_id).cancel() + return {"cancelled": True} + + def backend_job_limit(self, *_args, **_kwargs): + """Return the job limit for the backend.""" + return {"maximumJobs": self._job_limit, "runningJobs": self._unfinished_jobs()} + + def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs): + """Update the specified job attribute with the given value.""" + job = self._get_job(job_id) + if attr_name == "name": + job._job_name = attr_value + if attr_name == "tags": + job._job_tags = attr_value.copy() + return {attr_name: attr_value} + + def backend_status(self, backend_name: str) -> Dict[str, Any]: + """Return the status of the backend.""" + return { + "backend_name": backend_name, + "backend_version": "0.0.0", + "operational": True, + "pending_jobs": 0, + "status_msg": "active", + } + + def tear_down(self): + """Clean up job threads.""" + for job_id in list(self._jobs.keys()): + try: + self._jobs[job_id].cancel() + except KeyError: + pass + + def _unfinished_jobs(self): + """Return the number of unfinished jobs.""" + return sum(1 for job in self._jobs.values() if job.status() not in API_JOB_FINAL_STATES) + + def _get_job(self, job_id): + """Return job if found.""" + if job_id not in self._jobs: + raise RequestsApiError("Job not found. Error code: 3250.") + return self._jobs[job_id] + + +class JobSubmitFailClient(BaseFakeAccountClient): + """Fake AccountClient used to fail a job submit.""" + + def __init__(self, failed_indexes): + """JobSubmitFailClient constructor.""" + if not isinstance(failed_indexes, list): + failed_indexes = [failed_indexes] + self._failed_indexes = failed_indexes + self._job_count = -1 + super().__init__() + + def job_submit(self, *_args, **_kwargs): # pylint: disable=arguments-differ + """Failing job submit.""" + self._job_count += 1 + if self._job_count in self._failed_indexes: + raise RequestsApiError("Job submit failed!") + return super().job_submit(*_args, **_kwargs) + + +class JobTimeoutClient(BaseFakeAccountClient): + """Fake AccountClient used to fail a job submit.""" + + def __init__(self, *args, max_fail_count=-1, **kwargs): + """JobTimeoutClient constructor.""" + self._fail_count = max_fail_count + super().__init__(*args, **kwargs) + + def job_final_status(self, job_id, *_args, **_kwargs): + """Wait until the job progress to a final state.""" + if self._fail_count != 0: + self._fail_count -= 1 + raise UserTimeoutExceededError("Job timed out!") + return super().job_final_status(job_id, *_args, **_kwargs) diff --git a/test/ibm_test_case.py b/test/ibm_test_case.py index ac91782a6..5c4e642ab 100644 --- a/test/ibm_test_case.py +++ b/test/ibm_test_case.py @@ -16,13 +16,14 @@ import copy import logging import inspect -import unittest import warnings from contextlib import suppress from collections import defaultdict from typing import DefaultDict, Dict from qiskit.test.reference_circuits import ReferenceCircuits +from qiskit.test.base import BaseQiskitTestCase + from qiskit_ibm_runtime import QISKIT_IBM_RUNTIME_LOGGER_NAME from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Options @@ -31,7 +32,7 @@ from .templates import RUNTIME_PROGRAM, RUNTIME_PROGRAM_METADATA, PROGRAM_PREFIX -class IBMTestCase(unittest.TestCase): +class IBMTestCase(BaseQiskitTestCase): """Custom TestCase for use with qiskit-ibm-runtime.""" log: logging.Logger diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index 63a77d496..605ab8bc2 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -12,11 +12,18 @@ """Tests for backend functions using real runtime service.""" -from unittest import SkipTest +from unittest import SkipTest, mock from datetime import datetime, timedelta import copy from qiskit.transpiler.target import Target +from qiskit import QuantumCircuit +from qiskit.providers.exceptions import QiskitBackendNotFoundError +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.ibm_qubit_properties import IBMQubitProperties +from qiskit_ibm_provider.exceptions import IBMBackendValueError + from qiskit_ibm_runtime import QiskitRuntimeService from ..ibm_test_case import IBMIntegrationTestCase @@ -75,7 +82,7 @@ def setUpClass(cls): super().setUpClass() if cls.dependencies.channel == "ibm_cloud": # TODO use real device when cloud supports it - cls.backend = cls.dependencies.service.least_busy(min_num_qubits=5) + cls.backend = cls.dependencies.service.least_busy(simulator=False, min_num_qubits=5) if cls.dependencies.channel == "ibm_quantum": cls.backend = cls.dependencies.service.least_busy( simulator=False, min_num_qubits=5, instance=cls.dependencies.instance @@ -170,13 +177,6 @@ def test_backend_invalid_attribute(self): with self.assertRaises(AttributeError): backend.foobar # pylint: disable=pointless-statement - def test_backend_run(self): - """Check one cannot do backend.run""" - backend = self.backend - with self.subTest(backend=backend.name): - with self.assertRaises(RuntimeError): - backend.run() - def test_backend_deepcopy(self): """Test that deepcopy on IBMBackend works correctly""" backend = self.backend @@ -199,3 +199,73 @@ def test_backend_deepcopy(self): backend_copy._api_client._session.base_url, backend._api_client._session.base_url, ) + + def test_backend_pending_jobs(self): + """Test pending jobs are returned.""" + backends = self.service.backends() + self.assertTrue(any(backend.status().pending_jobs > 0 for backend in backends)) + + def test_backend_fetch_all_qubit_properties(self): + """Check retrieving properties of all qubits""" + num_qubits = self.backend.num_qubits + qubits = list(range(num_qubits)) + qubit_properties = self.backend.qubit_properties(qubits) + self.assertEqual(len(qubit_properties), num_qubits) + for i in qubits: + self.assertIsInstance(qubit_properties[i], IBMQubitProperties) + + def test_sim_backend_options(self): + """Test simulator backend options.""" + backend = self.service.backend("ibmq_qasm_simulator") + backend.options.shots = 2048 + backend.set_options(memory=True) + inputs = backend.run(ReferenceCircuits.bell(), shots=1, foo="foo").inputs + self.assertEqual(inputs["shots"], 1) + self.assertTrue(inputs["memory"]) + self.assertEqual(inputs["foo"], "foo") + + @production_only + def test_paused_backend_warning(self): + """Test that a warning is given when running jobs on a paused backend.""" + backend = self.service.backend("ibmq_qasm_simulator") + paused_status = backend.status() + paused_status.status_msg = "internal" + backend.status = mock.MagicMock(return_value=paused_status) + with self.assertWarns(Warning): + backend.run(ReferenceCircuits.bell()) + + def test_backend_wrong_instance(self): + """Test that an error is raised when retrieving a backend not in the instance.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud channel does not have instance.") + + backends = self.service.backends() + hgps = self.service._hgps.values() + if len(hgps) >= 2: + for hgp in hgps: + backend_names = list(hgp._backends) + for backend in backends: + if backend.name not in backend_names: + with self.assertRaises(QiskitBackendNotFoundError): + self.service.backend( + backend.name, + instance=f"{hgp._hub}/{hgp._group}/{hgp._project}", + ) + return + + def test_retrieve_backend_not_exist(self): + """Test that an error is raised when retrieving a backend that does not exist.""" + with self.assertRaises(QiskitBackendNotFoundError): + self.service.backend("nonexistent_backend") + + def test_too_many_qubits_in_circuit(self): + """Check error message if circuit contains more qubits than supported on the backend.""" + num = len(self.backend.properties().qubits) + num_qubits = num + 1 + circuit = QuantumCircuit(num_qubits, num_qubits) + with self.assertRaises(IBMBackendValueError) as err: + _ = self.backend.run(circuit) + self.assertIn( + f"Circuit contains {num_qubits} qubits, but backend has only {num}.", + str(err.exception), + ) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py new file mode 100644 index 000000000..0ab89b85b --- /dev/null +++ b/test/integration/test_ibm_job.py @@ -0,0 +1,436 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""IBMJob Test.""" +import copy +import time +from datetime import datetime, timedelta +from threading import Thread, Event +from unittest import SkipTest, mock +from unittest import skip + +from dateutil import tz +from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister +from qiskit.compiler import transpile +from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.api.rest.job import Job as RestJob +from qiskit_ibm_provider.exceptions import IBMBackendApiError + +from qiskit_ibm_runtime import IBMBackend, RuntimeJob +from qiskit_ibm_runtime.api.exceptions import RequestsApiError +from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError, RuntimeJobNotFound +from ..decorators import ( + IntegrationTestDependencies, + integration_test_setup_with_backend, +) +from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob +from ..ibm_test_case import IBMIntegrationTestCase +from ..utils import ( + most_busy_backend, + cancel_job_safe, + submit_and_cancel, +) + + +class TestIBMJob(IBMIntegrationTestCase): + """Test ibm_job module.""" + + sim_backend: IBMBackend + real_device_backend: IBMBackend + bell = QuantumCircuit + sim_job: RuntimeJob + last_month: datetime + + @classmethod + @integration_test_setup_with_backend(simulator=False, min_num_qubits=2) + def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: + """Initial class level setup.""" + # pylint: disable=arguments-differ + super().setUpClass(dependencies=dependencies) + cls.sim_backend = dependencies.service.backend("ibmq_qasm_simulator") + cls.real_device_backend = backend + cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) + cls.sim_job = cls.sim_backend.run(cls.bell) + cls.last_month = datetime.now() - timedelta(days=30) + + def test_run_multiple_simulator(self): + """Test running multiple jobs in a simulator.""" + num_qubits = 16 + quantum_register = QuantumRegister(num_qubits, "qr") + classical_register = ClassicalRegister(num_qubits, "cr") + quantum_circuit = QuantumCircuit(quantum_register, classical_register) + for i in range(num_qubits - 1): + quantum_circuit.cx(quantum_register[i], quantum_register[i + 1]) + quantum_circuit.measure(quantum_register, classical_register) + num_jobs = 4 + job_array = [ + self.sim_backend.run(transpile([quantum_circuit] * 20), shots=2048) + for _ in range(num_jobs) + ] + timeout = 30 + start_time = time.time() + while True: + check = sum(job.status() is JobStatus.RUNNING for job in job_array) + if check >= 2: + self.log.info("found %d simultaneous jobs", check) + break + if all((job.status() is JobStatus.DONE for job in job_array)): + # done too soon? don't generate error + self.log.warning("all jobs completed before simultaneous jobs could be detected") + break + for job in job_array: + self.log.info( + "%s %s %s %s", + job.status(), + job.status() is JobStatus.RUNNING, + check, + job.job_id(), + ) + self.log.info("- %s", str(time.time() - start_time)) + if time.time() - start_time > timeout and self.sim_backend.status().pending_jobs <= 4: + raise TimeoutError( + "Failed to see multiple running jobs after " "{0} seconds.".format(timeout) + ) + time.sleep(0.2) + + result_array = [job.result() for job in job_array] + self.log.info("got back all job results") + # Ensure all jobs have finished. + self.assertTrue(all((job.status() is JobStatus.DONE for job in job_array))) + self.assertTrue(all((result.success for result in result_array))) + + # Ensure job ids are unique. + job_ids = [job.job_id() for job in job_array] + self.assertEqual(sorted(job_ids), sorted(list(set(job_ids)))) + + def test_cancel(self): + """Test job cancellation.""" + # Find the most busy backend + backend = most_busy_backend(self.service) + submit_and_cancel(backend, self.log) + + def test_retrieve_jobs(self): + """Test retrieving jobs.""" + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=5, + skip=0, + created_after=self.last_month, + ) + self.assertLessEqual(len(job_list), 5) + for job in job_list: + self.assertTrue(isinstance(job.job_id(), str)) + + def test_retrieve_completed_jobs(self): + """Test retrieving jobs with the completed filter.""" + completed_job_list = self.service.jobs( + backend_name=self.sim_backend.name, limit=3, pending=False + ) + for job in completed_job_list: + self.assertTrue(job.status() in [JobStatus.DONE, JobStatus.CANCELLED, JobStatus.ERROR]) + + def test_retrieve_pending_jobs(self): + """Test retrieving jobs with the pending filter.""" + pending_job_list = self.service.jobs( + backend_name=self.sim_backend.name, limit=3, pending=True + ) + for job in pending_job_list: + self.assertTrue(job.status() in [JobStatus.QUEUED, JobStatus.RUNNING]) + + def test_retrieve_job(self): + """Test retrieving a single job.""" + retrieved_job = self.service.job(self.sim_job.job_id()) + self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id()) + self.assertEqual(self.sim_job.inputs["circuits"], retrieved_job.inputs["circuits"]) + self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts()) + + def test_retrieve_job_uses_appropriate_backend(self): + """Test that retrieved jobs come from their appropriate backend.""" + backend_1 = self.real_device_backend + # Get a second backend. + backend_2 = None + service = self.real_device_backend.service + for my_backend in service.backends(): + if my_backend.status().operational and my_backend.name != backend_1.name: + backend_2 = my_backend + break + if not backend_2: + raise SkipTest("Skipping test that requires multiple backends") + + job_1 = backend_1.run(transpile(ReferenceCircuits.bell())) + job_2 = backend_2.run(transpile(ReferenceCircuits.bell())) + + # test a retrieved job's backend is the same as the queried backend + self.assertEqual(service.job(job_1.job_id()).backend().name, backend_1.name) + self.assertEqual(service.job(job_2.job_id()).backend().name, backend_2.name) + + # Cleanup + for job in [job_1, job_2]: + cancel_job_safe(job, self.log) + + def test_retrieve_job_error(self): + """Test retrieving an invalid job.""" + self.assertRaises(RuntimeJobNotFound, self.service.job, "BAD_JOB_ID") + + def test_retrieve_jobs_status(self): + """Test retrieving jobs filtered by status.""" + backend_jobs = self.service.jobs( + backend_name=self.sim_backend.name, + limit=5, + skip=5, + pending=False, + created_after=self.last_month, + ) + self.assertTrue(backend_jobs) + + for job in backend_jobs: + self.assertTrue( + job.status() in JOB_FINAL_STATES, + "Job {} has status {} when it should be DONE, CANCELLED, or ERROR".format( + job.job_id(), job.status() + ), + ) + + def test_retrieve_jobs_created_after(self): + """Test retrieving jobs created after a specified datetime.""" + past_month = datetime.now() - timedelta(days=30) + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_after=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertGreaterEqual( + job.creation_date, + past_month_tz_aware, + "job {} creation date {} not within range".format(job.job_id(), job.creation_date), + ) + + def test_retrieve_jobs_created_before(self): + """Test retrieving jobs created before a specified datetime.""" + past_month = datetime.now() - timedelta(days=30) + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_before=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertLessEqual( + job.creation_date, + past_month_tz_aware, + "job {} creation date {} not within range".format(job.job_id(), job.creation_date), + ) + + def test_retrieve_jobs_between_datetimes(self): + """Test retrieving jobs created between two specified datetimes.""" + date_today = datetime.now() + past_month = date_today - timedelta(30) + past_two_month = date_today - timedelta(60) + + # Add local tz in order to compare to `creation_date` which is tz aware. + past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal()) + past_two_month_tz_aware = past_two_month.replace(tzinfo=tz.tzlocal()) + + with self.subTest(): + job_list = self.service.jobs( + backend_name=self.sim_backend.name, + limit=2, + created_after=past_two_month, + created_before=past_month, + ) + self.assertTrue(job_list) + for job in job_list: + self.assertTrue( + (past_two_month_tz_aware <= job.creation_date <= past_month_tz_aware), + "job {} creation date {} not within range".format( + job.job_id(), job.creation_date + ), + ) + + def test_retrieve_jobs_order(self): + """Test retrieving jobs with different orders.""" + job = self.sim_backend.run(self.bell) + job.wait_for_final_state() + newest_jobs = self.service.jobs( + limit=10, + pending=False, + descending=True, + created_after=self.last_month, + ) + self.assertIn(job.job_id(), [rjob.job_id() for rjob in newest_jobs]) + + oldest_jobs = self.service.jobs( + limit=10, + pending=False, + descending=False, + created_after=self.last_month, + ) + self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs]) + + @skip("how do we support refresh") + def test_refresh_job_result(self): + """Test re-retrieving job result via refresh.""" + result = self.sim_job.result() + + # Save original cached results. + cached_result = copy.deepcopy(result.to_dict()) + self.assertTrue(cached_result) + + # Modify cached results. + result.results[0].header.name = "modified_result" + self.assertNotEqual(cached_result, result.to_dict()) + self.assertEqual(result.results[0].header.name, "modified_result") + + # Re-retrieve result via refresh. + result = self.sim_job.result(refresh=True) + self.assertDictEqual(cached_result, result.to_dict()) + self.assertNotEqual(result.results[0].header.name, "modified_result") + + @skip("TODO update test case") + def test_wait_for_final_state(self): + """Test waiting for job to reach final state.""" + + def final_state_callback(c_job_id, c_status, c_job, **kwargs): + """Job status query callback function.""" + self.assertEqual(c_job_id, job.job_id()) + self.assertNotIn(c_status, JOB_FINAL_STATES) + self.assertEqual(c_job.job_id(), job.job_id()) + self.assertIn("queue_info", kwargs) + + queue_info = kwargs.pop("queue_info", None) + callback_info["called"] = True + + if wait_time is None: + # Look for status change. + data = {"status": c_status, "queue_info": queue_info} + self.assertNotEqual(data, callback_info["last data"]) + callback_info["last data"] = data + else: + # Check called within wait time. + if callback_info["last call time"] and job._status not in JOB_FINAL_STATES: + self.assertAlmostEqual( + time.time() - callback_info["last call time"], + wait_time, + delta=0.2, + ) + callback_info["last call time"] = time.time() + + def job_canceller(job_, exit_event, wait): + exit_event.wait(wait) + cancel_job_safe(job_, self.log) + + wait_args = [2, None] + + saved_api = self.sim_backend._api_client + try: + self.sim_backend._api_client = BaseFakeAccountClient(job_class=CancelableFakeJob) + for wait_time in wait_args: + with self.subTest(wait_time=wait_time): + # Put callback data in a dictionary to make it mutable. + callback_info = { + "called": False, + "last call time": 0.0, + "last data": {}, + } + cancel_event = Event() + job = self.sim_backend.run(self.bell) + # Cancel the job after a while. + Thread(target=job_canceller, args=(job, cancel_event, 7), daemon=True).start() + try: + job.wait_for_final_state( + timeout=10, wait=wait_time, callback=final_state_callback + ) + self.assertTrue(job.in_final_state()) + self.assertTrue(callback_info["called"]) + cancel_event.set() + finally: + # Ensure all threads ended. + for thread in job._executor._threads: + thread.join(0.1) + finally: + self.sim_backend._api_client = saved_api + + def test_wait_for_final_state_timeout(self): + """Test waiting for job to reach final state times out.""" + backend = most_busy_backend(TestIBMJob.service) + job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend)) + try: + self.assertRaises(RuntimeJobTimeoutError, job.wait_for_final_state, timeout=0.1) + finally: + # Ensure all threads ended. + for thread in job._executor._threads: + thread.join(0.1) + cancel_job_safe(job, self.log) + + @skip("not supported by api") + def test_job_submit_partial_fail(self): + """Test job submit partial fail.""" + job_id = [] + + def _side_effect(self, *args, **kwargs): + # pylint: disable=unused-argument + job_id.append(self.job_id) + raise RequestsApiError("Kaboom") + + fail_points = ["put_object_storage", "callback_upload"] + + for fail_method in fail_points: + with self.subTest(fail_method=fail_method): + with mock.patch.object( + RestJob, fail_method, side_effect=_side_effect, autospec=True + ): + with self.assertRaises(IBMBackendApiError): + self.sim_backend.run(self.bell) + + self.assertTrue(job_id, "Job ID not saved.") + job = self.service.job(job_id[0]) + self.assertEqual( + job.status(), + JobStatus.CANCELLED, + f"Job {job.job_id()} status is {job.status()} and not cancelled!", + ) + + def test_job_circuits(self): + """Test job circuits.""" + self.assertEqual(str(self.bell), str(self.sim_job.inputs["circuits"][0])) + + def test_job_options(self): + """Test job options.""" + run_config = {"shots": 2048, "memory": True} + job = self.sim_backend.run(self.bell, **run_config) + self.assertLessEqual(run_config.items(), job.inputs.items()) + + def test_job_header(self): + """Test job header.""" + custom_header = {"test": "test_job_header"} + job = self.sim_backend.run(self.bell, header=custom_header) + self.assertEqual(custom_header["test"], job.inputs["header"]["test"]) + self.assertLessEqual(custom_header.items(), job.inputs["header"].items()) + + def test_lazy_loading_params(self): + """Test lazy loading job params.""" + job = self.sim_backend.run(self.bell) + job.wait_for_final_state() + + rjob = self.service.job(job.job_id()) + self.assertFalse(rjob._params) + self.assertTrue(rjob.inputs["circuits"]) diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py new file mode 100644 index 000000000..a94b72c50 --- /dev/null +++ b/test/integration/test_ibm_job_attributes.py @@ -0,0 +1,323 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Test IBMJob attributes.""" + +import re +import time +import uuid +from datetime import datetime, timedelta +from unittest import mock, skip + +from dateutil import tz +from qiskit.compiler import transpile +from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES +from qiskit import QuantumCircuit +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_provider.api.clients.runtime import RuntimeClient +from qiskit_ibm_provider.exceptions import ( + IBMBackendValueError, +) +from qiskit_ibm_provider.job.exceptions import IBMJobFailureError + +from qiskit_ibm_runtime import IBMBackend, RuntimeJob +from ..decorators import ( + IntegrationTestDependencies, + integration_test_setup, +) +from ..ibm_test_case import IBMTestCase +from ..utils import ( + most_busy_backend, + cancel_job_safe, + submit_job_bad_shots, + submit_job_one_bad_instr, +) + + +class TestIBMJobAttributes(IBMTestCase): + """Test IBMJob instance attributes.""" + + sim_backend: IBMBackend + bell: QuantumCircuit + sim_job: RuntimeJob + last_week: datetime + + @classmethod + @integration_test_setup() + def setUpClass(cls, dependencies: IntegrationTestDependencies) -> None: + """Initial class level setup.""" + # pylint: disable=arguments-differ + super().setUpClass() + cls.dependencies = dependencies + cls.service = dependencies.service + cls.sim_backend = dependencies.service.backend("ibmq_qasm_simulator") + cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) + cls.sim_job = cls.sim_backend.run(cls.bell) + cls.last_week = datetime.now() - timedelta(days=7) + + def setUp(self): + """Initial test setup.""" + super().setUp() + self._qc = ReferenceCircuits.bell() + + def test_job_id(self): + """Test getting a job ID.""" + self.assertTrue(self.sim_job.job_id() is not None) + + def test_get_backend_name(self): + """Test getting a backend name.""" + self.assertTrue(self.sim_job.backend().name == self.sim_backend.name) + + @skip("Skip until aer issue 1214 is fixed") + def test_error_message_simulator(self): + """Test retrieving job error messages from a simulator backend.""" + job = submit_job_one_bad_instr(self.sim_backend) + with self.assertRaises(IBMJobFailureError) as err_cm: + job.result() + self.assertNotIn("bad_instruction", err_cm.exception.message) + + message = job.error_message() + self.assertIn("Experiment 1: ERROR", message) + + r_message = self.service.job(job.job_id()).error_message() + self.assertIn("Experiment 1: ERROR", r_message) + + @skip("not supported by api") + def test_error_message_validation(self): + """Test retrieving job error message for a validation error.""" + job = submit_job_bad_shots(self.sim_backend) + rjob = self.service.job(job.job_id()) + + for q_job, partial in [(job, False), (rjob, True)]: + with self.subTest(partial=partial): + with self.assertRaises(IBMJobFailureError) as err_cm: + q_job.result(partial=partial) + for msg in (err_cm.exception.message, q_job.error_message()): + self.assertNotIn("Unknown", msg) + self.assertIsNotNone(re.search(r"Error code: [0-9]{4}\.$", msg), msg) + + self.assertEqual(job.error_message(), rjob.error_message()) + + @skip("time_per_step not supported by the api") + def test_refresh(self): + """Test refreshing job data.""" + self.sim_job._wait_for_completion() + if "COMPLETED" not in self.sim_job.time_per_step(): + self.sim_job.refresh() + + rjob = self.service.job(self.sim_job.job_id()) + rjob.refresh() + self.assertEqual(rjob._time_per_step, self.sim_job._time_per_step) + + def test_job_creation_date(self): + """Test retrieving creation date, while ensuring it is in local time.""" + # datetime, before running the job, in local time. + start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) + job = self.sim_backend.run(self.bell) + job.result() + # datetime, after the job is done running, in local time. + end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) + + self.assertTrue( + (start_datetime <= job.creation_date <= end_datetime), + "job creation date {} is not " + "between the start date time {} and end date time {}".format( + job.creation_date, start_datetime, end_datetime + ), + ) + + @skip("time_per_step supported in provider but not in runtime") + def test_time_per_step(self): + """Test retrieving time per step, while ensuring the date times are in local time.""" + # datetime, before running the job, in local time. + start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) + job = self.sim_backend.run(self.bell) + job.result() + # datetime, after the job is done running, in local time. + end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) + + self.assertTrue(job.time_per_step()) + for step, time_data in job.time_per_step().items(): + self.assertTrue( + (start_datetime <= time_data <= end_datetime), + 'job time step "{}={}" is not ' + "between the start date time {} and end date time {}".format( + step, time_data, start_datetime, end_datetime + ), + ) + + rjob = self.service.job(job.job_id()) + self.assertTrue(rjob.time_per_step()) + + @skip("need attributes not supported") + def test_new_job_attributes(self): + """Test job with new attributes.""" + + def _mocked__api_job_submit(*args, **kwargs): + submit_info = original_submit(*args, **kwargs) + submit_info.update({"batman": "bruce"}) + return submit_info + + original_submit = self.sim_backend._api_client.job_submit + with mock.patch.object(RuntimeClient, "job_submit", side_effect=_mocked__api_job_submit): + job = self.sim_backend.run(self.bell) + + self.assertEqual(job.batman_, "bruce") + + @skip("queue_info supported in provider but not here") + def test_queue_info(self): + """Test retrieving queue information.""" + # Find the most busy backend. + backend = most_busy_backend(self.service) + leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING] + job = backend.run(self.bell) + queue_info = None + for _ in range(20): + queue_info = job.queue_info() + # Even if job status is queued, its queue info may not be immediately available. + if ( + job._status is JobStatus.QUEUED and job.queue_position() is not None + ) or job._status in leave_states: + break + time.sleep(1) + + if job._status is JobStatus.QUEUED and job.queue_position() is not None: + self.log.debug( + "Job id=%s, queue info=%s, queue position=%s", + job.job_id(), + queue_info, + job.queue_position(), + ) + msg = "Job {} is queued but has no ".format(job.job_id()) + self.assertIsNotNone(queue_info, msg + "queue info.") + for attr, value in queue_info.__dict__.items(): + self.assertIsNotNone(value, msg + attr) + self.assertTrue( + all( + 0 < priority <= 1.0 + for priority in [ + queue_info.hub_priority, + queue_info.group_priority, + queue_info.project_priority, + ] + ), + "Unexpected queue info {} for job {}".format(queue_info, job.job_id()), + ) + + self.assertTrue(queue_info.format()) + self.assertTrue(repr(queue_info)) + elif job._status is not None: + self.assertIsNone(job.queue_position()) + self.log.warning("Unable to retrieve queue information") + + # Cancel job so it doesn't consume more resources. + cancel_job_safe(job, self.log) + + def test_esp_readout_not_enabled(self): + """Test that an error is thrown is ESP readout is used and the backend does not support it.""" + # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` + with self.assertRaises(IBMBackendValueError) as context_manager: + self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertIn( + "ESP readout not supported on this device. Please make sure the flag " + "'use_measure_esp' is unset or set to 'False'.", + context_manager.exception.message, + ) + + def test_esp_readout_enabled(self): + """Test that ESP readout can be used when the backend supports it.""" + try: + setattr(self.sim_backend._configuration, "measure_esp_enabled", True) + job = self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertEqual(job.inputs["use_measure_esp"], True) + finally: + delattr(self.sim_backend._configuration, "measure_esp_enabled") + + def test_esp_readout_default_value(self): + """Test that ESP readout is set to backend support value if not specified.""" + try: + # ESP readout not enabled on backend + setattr(self.sim_backend._configuration, "measure_esp_enabled", False) + job = self.sim_backend.run(self.bell) + self.assertIsNone(getattr(job.inputs, "use_measure_esp", None)) + # ESP readout enabled on backend + setattr(self.sim_backend._configuration, "measure_esp_enabled", True) + job = self.sim_backend.run(self.bell, use_measure_esp=True) + self.assertEqual(job.inputs["use_measure_esp"], True) + finally: + delattr(self.sim_backend._configuration, "measure_esp_enabled") + + def test_job_tags(self): + """Test using job tags.""" + # Use a unique tag. + job_tags = [ + uuid.uuid4().hex[0:16], + uuid.uuid4().hex[0:16], + uuid.uuid4().hex[0:16], + ] + job = self.sim_backend.run(self.bell, job_tags=job_tags) + + no_rjobs_tags = [job_tags[0:1] + ["phantom_tags"], ["phantom_tag"]] + for tags in no_rjobs_tags: + rjobs = self.service.jobs(job_tags=tags, created_after=self.last_week) + self.assertEqual(len(rjobs), 0, "Expected job {}, got {}".format(job.job_id(), rjobs)) + + has_rjobs_tags = [job_tags, job_tags[1:3]] + for tags in has_rjobs_tags: + with self.subTest(tags=tags): + rjobs = self.service.jobs( + job_tags=tags, + created_after=self.last_week, + ) + self.assertEqual( + len(rjobs), 1, "Expected job {}, got {}".format(job.job_id(), rjobs) + ) + self.assertEqual(rjobs[0].job_id(), job.job_id()) + # TODO check why this sometimes fails + # self.assertEqual(set(rjobs[0].tags()), set(job_tags)) + + @skip("refresh supported in provider but not in runtime") + def test_job_tags_replace(self): + """Test updating job tags by replacing a job's existing tags.""" + initial_job_tags = [uuid.uuid4().hex[:16]] + job = self.sim_backend.run(self.bell, job_tags=initial_job_tags) + + tags_to_replace_subtests = [ + [], # empty tags. + list("{}_new_tag_{}".format(uuid.uuid4().hex[:5], i) for i in range(2)), # unique tags. + initial_job_tags + ["foo"], + ] + for tags_to_replace in tags_to_replace_subtests: + with self.subTest(tags_to_replace=tags_to_replace): + # Update the job tags. + _ = job.update_tags(new_tags=tags_to_replace) + + # Wait a bit so we don't get cached results. + time.sleep(2) + job.refresh() + + self.assertEqual(set(tags_to_replace), set(job.tags())) + + def test_invalid_job_tags(self): + """Test using job tags with an and operator.""" + self.assertRaises(IBMBackendValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) + self.assertRaises( + IBMBackendValueError, + self.service.jobs, + job_tags=[1, 2, 3], + ) + + def test_cost_estimation(self): + """Test cost estimation is returned correctly.""" + self.assertTrue(self.sim_job.usage_estimation) + self.assertIn("quantum_seconds", self.sim_job.usage_estimation) diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py new file mode 100644 index 000000000..1cb074c84 --- /dev/null +++ b/test/integration/test_ibm_qasm_simulator.py @@ -0,0 +1,171 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Test IBM Quantum online QASM simulator.""" + +from unittest import mock +from unittest import skip + +from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister +from qiskit.compiler import transpile +from qiskit.providers.aer.noise import ( # pylint: disable=import-error,no-name-in-module + NoiseModel, +) +from qiskit.test.reference_circuits import ReferenceCircuits + +from qiskit_ibm_runtime import IBMBackend +from ..decorators import ( + integration_test_setup_with_backend, + IntegrationTestDependencies, +) +from ..ibm_test_case import IBMIntegrationTestCase + + +class TestIBMQasmSimulator(IBMIntegrationTestCase): + """Test IBM Quantum QASM Simulator.""" + + @integration_test_setup_with_backend(simulator=False) + def setUp(self, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: + """Initial test setup.""" + # pylint: disable=unused-argument + # pylint: disable=arguments-differ + super().setUp() + self.sim_backend = self.service.backend("ibmq_qasm_simulator") + self.real_device_backend = backend + + def test_execute_one_circuit_simulator_online(self): + """Test execute_one_circuit_simulator_online.""" + quantum_register = QuantumRegister(1) + classical_register = ClassicalRegister(1) + quantum_circuit = QuantumCircuit(quantum_register, classical_register, name="qc") + quantum_circuit.h(quantum_register[0]) + quantum_circuit.measure(quantum_register[0], classical_register[0]) + circs = transpile(quantum_circuit, backend=self.sim_backend) + shots = 1024 + job = self.sim_backend.run(circs, shots=shots) + result = job.result() + counts = result.get_counts(quantum_circuit) + target = {"0": shots / 2, "1": shots / 2} + threshold = 0.1 * shots + self.assertDictAlmostEqual(counts, target, threshold) + + def test_execute_several_circuits_simulator_online(self): + """Test execute_several_circuits_simulator_online.""" + quantum_register = QuantumRegister(2) + classical_register = ClassicalRegister(2) + qcr1 = QuantumCircuit(quantum_register, classical_register, name="qc1") + qcr2 = QuantumCircuit(quantum_register, classical_register, name="qc2") + qcr1.h(quantum_register) + qcr2.h(quantum_register[0]) + qcr2.cx(quantum_register[0], quantum_register[1]) + qcr1.measure(quantum_register[0], classical_register[0]) + qcr1.measure(quantum_register[1], classical_register[1]) + qcr2.measure(quantum_register[0], classical_register[0]) + qcr2.measure(quantum_register[1], classical_register[1]) + shots = 1024 + circs = transpile([qcr1, qcr2], backend=self.sim_backend) + job = self.sim_backend.run(circs, shots=shots) + result = job.result() + counts1 = result.get_counts(qcr1) + counts2 = result.get_counts(qcr2) + target1 = {"00": shots / 4, "01": shots / 4, "10": shots / 4, "11": shots / 4} + target2 = {"00": shots / 2, "11": shots / 2} + threshold = 0.1 * shots + self.assertDictAlmostEqual(counts1, target1, threshold) + self.assertDictAlmostEqual(counts2, target2, threshold) + + def test_online_qasm_simulator_two_registers(self): + """Test online_qasm_simulator_two_registers.""" + qr1 = QuantumRegister(2) + cr1 = ClassicalRegister(2) + qr2 = QuantumRegister(2) + cr2 = ClassicalRegister(2) + qcr1 = QuantumCircuit(qr1, qr2, cr1, cr2, name="circuit1") + qcr2 = QuantumCircuit(qr1, qr2, cr1, cr2, name="circuit2") + qcr1.x(qr1[0]) + qcr2.x(qr2[1]) + qcr1.measure(qr1[0], cr1[0]) + qcr1.measure(qr1[1], cr1[1]) + qcr1.measure(qr2[0], cr2[0]) + qcr1.measure(qr2[1], cr2[1]) + qcr2.measure(qr1[0], cr1[0]) + qcr2.measure(qr1[1], cr1[1]) + qcr2.measure(qr2[0], cr2[0]) + qcr2.measure(qr2[1], cr2[1]) + circs = transpile([qcr1, qcr2], self.sim_backend) + job = self.sim_backend.run(circs, shots=1024) + result = job.result() + result1 = result.get_counts(qcr1) + result2 = result.get_counts(qcr2) + self.assertEqual(result1, {"00 01": 1024}) + self.assertEqual(result2, {"10 00": 1024}) + + @skip("TODO refactor to use backend._runtime_run") + def test_new_sim_method(self): + """Test new simulator methods.""" + + def _new_submit(qobj, *args, **kwargs): + # pylint: disable=unused-argument + self.assertEqual( + qobj.config.method, "extended_stabilizer", f"qobj header={qobj.header}" + ) + return mock.MagicMock() + + backend = self.sim_backend + + sim_method = backend._configuration._data.get("simulation_method", None) + submit_fn = backend._submit_job + + try: + backend._configuration._data["simulation_method"] = "extended_stabilizer" + backend._submit_job = _new_submit + circ = transpile(ReferenceCircuits.bell(), backend=backend) + backend.run(circ, header={"test": "circuits"}) + finally: + backend._configuration._data["simulation_method"] = sim_method + backend._submit_job = submit_fn + + @skip("TODO refactor to use backend._runtime_run") + def test_new_sim_method_no_overwrite(self): + """Test custom method option is not overwritten.""" + + def _new_submit(qobj, *args, **kwargs): + # pylint: disable=unused-argument + self.assertEqual(qobj.config.method, "my_method", f"qobj header={qobj.header}") + return mock.MagicMock() + + backend = self.sim_backend + + sim_method = backend._configuration._data.get("simulation_method", None) + submit_fn = backend._submit_job + + try: + backend._configuration._data["simulation_method"] = "extended_stabilizer" + backend._submit_job = _new_submit + circ = transpile(ReferenceCircuits.bell(), backend=backend) + backend.run(circ, method="my_method", header={"test": "circuits"}) + finally: + backend._configuration._data["simulation_method"] = sim_method + backend._submit_job = submit_fn + + # @skip( + # "NoiseModel.from_backend does not currently support V2 Backends. \ + # Skip test until it's fixed in aer." + # ) + def test_simulator_with_noise_model(self): + """Test using simulator with a noise model.""" + noise_model = NoiseModel.from_backend(self.real_device_backend) + result = self.sim_backend.run( + transpile(ReferenceCircuits.bell(), backend=self.sim_backend), + noise_model=noise_model, + ).result() + self.assertTrue(result) diff --git a/test/integration/test_session.py b/test/integration/test_session.py index 6b6bdb285..6b7d77c5d 100644 --- a/test/integration/test_session.py +++ b/test/integration/test_session.py @@ -16,6 +16,7 @@ from qiskit.quantum_info import SparsePauliOp from qiskit.test.reference_circuits import ReferenceCircuits from qiskit.primitives import EstimatorResult, SamplerResult +from qiskit.result import Result from qiskit_ibm_runtime import Estimator, Session, Sampler, Options @@ -97,3 +98,77 @@ def test_session_from_id(self, service): sampler = Sampler(session=new_session) job = sampler.run(ReferenceCircuits.bell(), shots=400) self.assertEqual(session_id, job.session_id) + + +class TestBackendRunInSession(IBMIntegrationTestCase): + """Integration tests for Backend.run in Session.""" + + def test_session_id(self): + """Test that session_id is updated correctly and maintained throughout the session""" + backend = self.service.get_backend("ibmq_qasm_simulator") + backend.open_session() + self.assertEqual(backend.session.session_id, None) + self.assertTrue(backend.session.active) + job1 = backend.run(ReferenceCircuits.bell()) + self.assertEqual(job1._session_id, job1.job_id()) + job2 = backend.run(ReferenceCircuits.bell()) + self.assertFalse(job2._session_id == job2.job_id()) + + def test_backend_run_with_session(self): + """Test that 'shots' parameter is transferred correctly""" + shots = 1000 + backend = self.service.backend("ibmq_qasm_simulator") + backend.open_session() + result = backend.run(circuits=ReferenceCircuits.bell(), shots=shots).result() + backend.cancel_session() + self.assertIsInstance(result, Result) + self.assertEqual(result.results[0].shots, shots) + self.assertAlmostEqual( + result.get_counts()["00"], result.get_counts()["11"], delta=shots / 10 + ) + + def test_session_cancel(self): + """Test closing a session""" + backend = self.service.backend("ibmq_qasm_simulator") + backend.open_session() + self.assertTrue(backend.session.active) + backend.cancel_session() + self.assertIsNone(backend.session) + + def test_run_after_cancel(self): + """Test running after session is cancelled.""" + backend = self.service.backend("ibmq_qasm_simulator") + job1 = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job1._session_id) + + backend.open_session() + job2 = backend.run(ReferenceCircuits.bell()) + self.assertIsNotNone(job2._session_id) + backend.cancel_session() + + job3 = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job3._session_id) + + def test_session_as_context_manager(self): + """Test session as a context manager""" + backend = self.service.backend("ibmq_qasm_simulator") + + with backend.open_session() as session: + job1 = backend.run(ReferenceCircuits.bell()) + session_id = session.session_id + self.assertEqual(session_id, job1.job_id()) + job2 = backend.run(ReferenceCircuits.bell()) + self.assertFalse(session_id == job2.job_id()) + + def test_run_after_cancel_as_context_manager(self): + """Test run after cancel in context manager""" + backend = self.service.backend("ibmq_qasm_simulator") + with backend.open_session() as session: + _ = backend.run(ReferenceCircuits.bell()) + self.assertEqual(backend.session, session) + backend.cancel_session() + job = backend.run(circuits=ReferenceCircuits.bell()) + self.assertIsNone(backend.session) + self.assertIsNone(job._session_id) diff --git a/test/unit/test_backend.py b/test/unit/test_backend.py new file mode 100644 index 000000000..d9f081823 --- /dev/null +++ b/test/unit/test_backend.py @@ -0,0 +1,308 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Tests for the backend functions.""" +import copy +from unittest import mock +import warnings + +from qiskit import transpile, qasm3, QuantumCircuit +from qiskit.providers.fake_provider import FakeManila +from qiskit.providers.models import BackendStatus + +from qiskit_ibm_provider.exceptions import IBMBackendValueError + +from qiskit_ibm_runtime.ibm_backend import IBMBackend + +from ..ibm_test_case import IBMTestCase +from ..utils import ( + create_faulty_backend, +) + + +class TestBackend(IBMTestCase): + """Tests for IBMBackend class.""" + + def test_raise_faulty_qubits(self): + """Test faulty qubits is raised.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + circ = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits): + circ.x(i) + + transpiled = transpile(circ, backend=fake_backend) + faulty_qubit = 4 + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn(f"faulty qubit {faulty_qubit}", str(err.exception)) + + def test_raise_faulty_qubits_many(self): + """Test faulty qubits is raised if one circuit uses it.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + + circ1 = QuantumCircuit(1, 1) + circ1.x(0) + circ2 = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits): + circ2.x(i) + + transpiled = transpile([circ1, circ2], backend=fake_backend) + faulty_qubit = 4 + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn(f"faulty qubit {faulty_qubit}", str(err.exception)) + + def test_raise_faulty_edge(self): + """Test faulty edge is raised.""" + fake_backend = FakeManila() + num_qubits = fake_backend.configuration().num_qubits + circ = QuantumCircuit(num_qubits, num_qubits) + for i in range(num_qubits - 2): + circ.cx(i, i + 1) + + transpiled = transpile(circ, backend=fake_backend) + edge_qubits = [0, 1] + ibm_backend = create_faulty_backend(fake_backend, faulty_edge=("cx", edge_qubits)) + + with self.assertRaises(ValueError) as err: + ibm_backend.run(transpiled) + + self.assertIn("cx", str(err.exception)) + self.assertIn(f"faulty edge {tuple(edge_qubits)}", str(err.exception)) + + @staticmethod + def test_faulty_qubit_not_used(): + """Test faulty qubit is not raise if not used.""" + fake_backend = FakeManila() + circ = QuantumCircuit(2, 2) + for i in range(2): + circ.x(i) + + transpiled = transpile(circ, backend=fake_backend, initial_layout=[0, 1]) + faulty_qubit = 4 + ibm_backend = create_faulty_backend(fake_backend, faulty_qubit=faulty_qubit) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + ibm_backend.run(circuits=transpiled) + + mock_run.assert_called_once() + + @staticmethod + def test_faulty_edge_not_used(): + """Test faulty edge is not raised if not used.""" + + fake_backend = FakeManila() + coupling_map = fake_backend.configuration().coupling_map + + circ = QuantumCircuit(2, 2) + circ.cx(0, 1) + + transpiled = transpile(circ, backend=fake_backend, initial_layout=coupling_map[0]) + edge_qubits = coupling_map[-1] + ibm_backend = create_faulty_backend(fake_backend, faulty_edge=("cx", edge_qubits)) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + ibm_backend.run(circuits=transpiled) + + mock_run.assert_called_once() + + def test_dynamic_circuits_warning(self): + """Test warning when user defines dynamic==False and circuits are dynamic""" + # pylint: disable=not-context-manager + + # backend is not faulty because no faulty parameters given + backend = create_faulty_backend(model_backend=FakeManila()) + + circuits = [] + circ = QuantumCircuit(2, 2) + circ.h(0) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + circuits.append(circ) + + circ = QuantumCircuit(3, 2) + with circ.for_loop(range(4)): + circ.h(0) + circuits.append(circ) + + circ = QuantumCircuit(2, 2) + circ.h(0) + circ.measure([0], [0]) + with circ.switch(target=0) as case: + with case(0): + circ.x(0) + with case(case.DEFAULT): + circ.cx(0, 1) + circuits.append(circ) + + for circuit in circuits: + # using warnings to catch multiple warnings + with warnings.catch_warnings(record=True) as warn: + with mock.patch.object(IBMBackend, "_runtime_run"): + backend.run(circuits=circuit, dynamic=False) + self.assertIn( + "Parameter 'dynamic' is False, but the circuit contains dynamic constructs.", + str(warn[0].message), + ) + self.assertIn( + f"The backend {backend.name} does not support dynamic circuits.", + str(warn[1].message), + ) + + @staticmethod + def _create_dc_test_backend(): + """Create a test backend with an IfElseOp enables.""" + model_backend = FakeManila() + properties = model_backend.properties() + + out_backend = IBMBackend( + configuration=model_backend.configuration(), + service=mock.MagicMock(), + api_client=None, + instance=None, + ) + + out_backend.status = lambda: BackendStatus( + backend_name="foo", + backend_version="1.0", + operational=True, + pending_jobs=0, + status_msg="", + ) + out_backend.properties = lambda: properties + + return out_backend + + def test_single_dynamic_circuit_submission(self): + """Test submitting single circuit with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True) + + mock_run.assert_called_once() + + def test_multi_dynamic_circuit_submission(self): + """Test submitting multiple circuits with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + circuits = [circ, circ] + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circuits, dynamic=True) + + mock_run.assert_called_once() + + def test_single_openqasm3_submission(self): + """Test submitting a single openqasm3 strings with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + qasm3_circ = qasm3.dumps(circ, disable_constants=True) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=qasm3_circ, dynamic=True) + + mock_run.assert_called_once() + + def test_runtime_image_selection_submission(self): + """Test image selection from runtime""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True) + + mock_run.assert_called_once() + + def test_multi_openqasm3_submission(self): + """Test submitting multiple openqasm3 strings with dynamic=True""" + # pylint: disable=not-context-manager + + backend = self._create_dc_test_backend() + + circ = QuantumCircuit(2, 2) + circ.measure(0, 0) + with circ.if_test((0, False)): + circ.x(1) + + image = "test-image" + + with mock.patch.object(IBMBackend, "_runtime_run") as mock_run: + backend.run(circuits=circ, dynamic=True, image=image) + + mock_run.assert_called_once() + self.assertEqual(mock_run.call_args.kwargs["image"], image) + + def test_deepcopy(self): + """Test that deepcopy of a backend works properly""" + backend = self._create_dc_test_backend() + backend_copy = copy.deepcopy(backend) + self.assertEqual(backend_copy.name, backend.name) + + def test_too_many_circuits(self): + """Test exception when number of circuits exceeds backend._max_circuits""" + model_backend = FakeManila() + backend = IBMBackend( + configuration=model_backend.configuration(), + service=mock.MagicMock(), + api_client=None, + instance=None, + ) + max_circs = backend.configuration().max_experiments + + circs = [] + for _ in range(max_circs + 1): + circ = QuantumCircuit(1) + circ.x(0) + circs.append(circ) + with self.assertRaises(IBMBackendValueError) as err: + backend.run(circs) + self.assertIn( + f"Number of circuits, {max_circs+1} exceeds the maximum for this backend, {max_circs}", + str(err.exception), + ) diff --git a/test/utils.py b/test/utils.py index 284ec0220..438197825 100644 --- a/test/utils.py +++ b/test/utils.py @@ -21,6 +21,9 @@ from datetime import datetime from qiskit.circuit import QuantumCircuit +from qiskit.compiler import transpile, assemble +from qiskit.qobj import QasmQobj +from qiskit.test.reference_circuits import ReferenceCircuits from qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus from qiskit.providers.exceptions import QiskitBackendNotFoundError from qiskit.providers.models import BackendStatus, BackendProperties @@ -57,6 +60,30 @@ def setup_test_logging(logger: logging.Logger, filename: str) -> None: logger.setLevel(os.getenv("LOG_LEVEL", "DEBUG")) +def most_busy_backend( + service: QiskitRuntimeService, + instance: Optional[str] = None, +) -> IBMBackend: + """Return the most busy backend for the provider given. + + Return the most busy available backend for those that + have a `pending_jobs` in their `status`. Backends such as + local backends that do not have this are not considered. + + Args: + service: Qiskit Runtime Service. + instance: The instance in the hub/group/project format. + + Returns: + The most busy backend. + """ + backends = service.backends(simulator=False, operational=True, instance=instance) + return max( + (b for b in backends if b.configuration().n_qubits >= 5), + key=lambda b: b.status().pending_jobs, + ) + + def get_large_circuit(backend: IBMBackend) -> QuantumCircuit: """Return a slightly larger circuit that would run a bit longer. @@ -254,3 +281,71 @@ def get_mocked_backend(name: str = "ibm_gotham") -> Any: mock_backend.name = name mock_backend._instance = None return mock_backend + + +def submit_and_cancel(backend: IBMBackend, logger: logging.Logger) -> RuntimeJob: + """Submit and cancel a job. + + Args: + backend: Backend to submit the job to. + + Returns: + Cancelled job. + """ + circuit = transpile(ReferenceCircuits.bell(), backend=backend) + job = backend.run(circuit) + cancel_job_safe(job, logger=logger) + return job + + +def submit_job_bad_shots(backend: IBMBackend) -> RuntimeJob: + """Submit a job that will fail due to too many shots. + + Args: + backend: Backend to submit the job to. + + Returns: + Submitted job. + """ + qobj = bell_in_qobj(backend=backend) + # Modify the number of shots to be an invalid amount. + qobj.config.shots = backend.configuration().max_shots + 10000 + job_to_fail = backend._submit_job(qobj) + return job_to_fail + + +def submit_job_one_bad_instr(backend: IBMBackend) -> RuntimeJob: + """Submit a job that contains one good and one bad instruction. + + Args: + backend: Backend to submit the job to. + + Returns: + Submitted job. + """ + qc_new = transpile(ReferenceCircuits.bell(), backend) + if backend.configuration().simulator: + # Specify method so it doesn't fail at method selection. + qobj = assemble([qc_new] * 2, backend=backend, method="statevector") + else: + qobj = assemble([qc_new] * 2, backend=backend) + qobj.experiments[1].instructions[1].name = "bad_instruction" + job = backend._submit_job(qobj) + return job + + +def bell_in_qobj(backend: IBMBackend, shots: int = 1024) -> QasmQobj: + """Return a bell circuit in Qobj format. + + Args: + backend: Backend to use for transpiling the circuit. + shots: Number of shots. + + Returns: + A bell circuit in Qobj format. + """ + return assemble( + transpile(ReferenceCircuits.bell(), backend=backend), + backend=backend, + shots=shots, + ) From e31033d917bf167e47893796b695a31d48f6be31 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 8 Nov 2023 14:18:09 -0500 Subject: [PATCH 14/22] Fix job_tags test & remove irrelevant skipped tests (#1204) * fix job_tags test, removed skipped tests * add 2 tests back --- test/integration/test_ibm_job_attributes.py | 236 ++++++-------------- 1 file changed, 72 insertions(+), 164 deletions(-) diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py index a94b72c50..c04768c18 100644 --- a/test/integration/test_ibm_job_attributes.py +++ b/test/integration/test_ibm_job_attributes.py @@ -12,23 +12,20 @@ """Test IBMJob attributes.""" -import re -import time import uuid +import time from datetime import datetime, timedelta -from unittest import mock, skip +from unittest import skip from dateutil import tz from qiskit.compiler import transpile -from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES from qiskit import QuantumCircuit +from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES from qiskit.test.reference_circuits import ReferenceCircuits -from qiskit_ibm_provider.api.clients.runtime import RuntimeClient from qiskit_ibm_provider.exceptions import ( IBMBackendValueError, ) -from qiskit_ibm_provider.job.exceptions import IBMJobFailureError from qiskit_ibm_runtime import IBMBackend, RuntimeJob from ..decorators import ( @@ -36,12 +33,7 @@ integration_test_setup, ) from ..ibm_test_case import IBMTestCase -from ..utils import ( - most_busy_backend, - cancel_job_safe, - submit_job_bad_shots, - submit_job_one_bad_instr, -) +from ..utils import most_busy_backend class TestIBMJobAttributes(IBMTestCase): @@ -78,47 +70,6 @@ def test_get_backend_name(self): """Test getting a backend name.""" self.assertTrue(self.sim_job.backend().name == self.sim_backend.name) - @skip("Skip until aer issue 1214 is fixed") - def test_error_message_simulator(self): - """Test retrieving job error messages from a simulator backend.""" - job = submit_job_one_bad_instr(self.sim_backend) - with self.assertRaises(IBMJobFailureError) as err_cm: - job.result() - self.assertNotIn("bad_instruction", err_cm.exception.message) - - message = job.error_message() - self.assertIn("Experiment 1: ERROR", message) - - r_message = self.service.job(job.job_id()).error_message() - self.assertIn("Experiment 1: ERROR", r_message) - - @skip("not supported by api") - def test_error_message_validation(self): - """Test retrieving job error message for a validation error.""" - job = submit_job_bad_shots(self.sim_backend) - rjob = self.service.job(job.job_id()) - - for q_job, partial in [(job, False), (rjob, True)]: - with self.subTest(partial=partial): - with self.assertRaises(IBMJobFailureError) as err_cm: - q_job.result(partial=partial) - for msg in (err_cm.exception.message, q_job.error_message()): - self.assertNotIn("Unknown", msg) - self.assertIsNotNone(re.search(r"Error code: [0-9]{4}\.$", msg), msg) - - self.assertEqual(job.error_message(), rjob.error_message()) - - @skip("time_per_step not supported by the api") - def test_refresh(self): - """Test refreshing job data.""" - self.sim_job._wait_for_completion() - if "COMPLETED" not in self.sim_job.time_per_step(): - self.sim_job.refresh() - - rjob = self.service.job(self.sim_job.job_id()) - rjob.refresh() - self.assertEqual(rjob._time_per_step, self.sim_job._time_per_step) - def test_job_creation_date(self): """Test retrieving creation date, while ensuring it is in local time.""" # datetime, before running the job, in local time. @@ -136,93 +87,6 @@ def test_job_creation_date(self): ), ) - @skip("time_per_step supported in provider but not in runtime") - def test_time_per_step(self): - """Test retrieving time per step, while ensuring the date times are in local time.""" - # datetime, before running the job, in local time. - start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) - job = self.sim_backend.run(self.bell) - job.result() - # datetime, after the job is done running, in local time. - end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) - - self.assertTrue(job.time_per_step()) - for step, time_data in job.time_per_step().items(): - self.assertTrue( - (start_datetime <= time_data <= end_datetime), - 'job time step "{}={}" is not ' - "between the start date time {} and end date time {}".format( - step, time_data, start_datetime, end_datetime - ), - ) - - rjob = self.service.job(job.job_id()) - self.assertTrue(rjob.time_per_step()) - - @skip("need attributes not supported") - def test_new_job_attributes(self): - """Test job with new attributes.""" - - def _mocked__api_job_submit(*args, **kwargs): - submit_info = original_submit(*args, **kwargs) - submit_info.update({"batman": "bruce"}) - return submit_info - - original_submit = self.sim_backend._api_client.job_submit - with mock.patch.object(RuntimeClient, "job_submit", side_effect=_mocked__api_job_submit): - job = self.sim_backend.run(self.bell) - - self.assertEqual(job.batman_, "bruce") - - @skip("queue_info supported in provider but not here") - def test_queue_info(self): - """Test retrieving queue information.""" - # Find the most busy backend. - backend = most_busy_backend(self.service) - leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING] - job = backend.run(self.bell) - queue_info = None - for _ in range(20): - queue_info = job.queue_info() - # Even if job status is queued, its queue info may not be immediately available. - if ( - job._status is JobStatus.QUEUED and job.queue_position() is not None - ) or job._status in leave_states: - break - time.sleep(1) - - if job._status is JobStatus.QUEUED and job.queue_position() is not None: - self.log.debug( - "Job id=%s, queue info=%s, queue position=%s", - job.job_id(), - queue_info, - job.queue_position(), - ) - msg = "Job {} is queued but has no ".format(job.job_id()) - self.assertIsNotNone(queue_info, msg + "queue info.") - for attr, value in queue_info.__dict__.items(): - self.assertIsNotNone(value, msg + attr) - self.assertTrue( - all( - 0 < priority <= 1.0 - for priority in [ - queue_info.hub_priority, - queue_info.group_priority, - queue_info.project_priority, - ] - ), - "Unexpected queue info {} for job {}".format(queue_info, job.job_id()), - ) - - self.assertTrue(queue_info.format()) - self.assertTrue(repr(queue_info)) - elif job._status is not None: - self.assertIsNone(job.queue_position()) - self.log.warning("Unable to retrieve queue information") - - # Cancel job so it doesn't consume more resources. - cancel_job_safe(job, self.log) - def test_esp_readout_not_enabled(self): """Test that an error is thrown is ESP readout is used and the backend does not support it.""" # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` @@ -286,33 +150,11 @@ def test_job_tags(self): # TODO check why this sometimes fails # self.assertEqual(set(rjobs[0].tags()), set(job_tags)) - @skip("refresh supported in provider but not in runtime") - def test_job_tags_replace(self): - """Test updating job tags by replacing a job's existing tags.""" - initial_job_tags = [uuid.uuid4().hex[:16]] - job = self.sim_backend.run(self.bell, job_tags=initial_job_tags) - - tags_to_replace_subtests = [ - [], # empty tags. - list("{}_new_tag_{}".format(uuid.uuid4().hex[:5], i) for i in range(2)), # unique tags. - initial_job_tags + ["foo"], - ] - for tags_to_replace in tags_to_replace_subtests: - with self.subTest(tags_to_replace=tags_to_replace): - # Update the job tags. - _ = job.update_tags(new_tags=tags_to_replace) - - # Wait a bit so we don't get cached results. - time.sleep(2) - job.refresh() - - self.assertEqual(set(tags_to_replace), set(job.tags())) - def test_invalid_job_tags(self): """Test using job tags with an and operator.""" - self.assertRaises(IBMBackendValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) + self.assertRaises(ValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) self.assertRaises( - IBMBackendValueError, + ValueError, self.service.jobs, job_tags=[1, 2, 3], ) @@ -321,3 +163,69 @@ def test_cost_estimation(self): """Test cost estimation is returned correctly.""" self.assertTrue(self.sim_job.usage_estimation) self.assertIn("quantum_seconds", self.sim_job.usage_estimation) + + @skip("time_per_step supported in provider but not in runtime") + def test_time_per_step(self): + """Test retrieving time per step, while ensuring the date times are in local time.""" + # datetime, before running the job, in local time. + start_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) - timedelta(seconds=1) + job = self.sim_backend.run(self.bell) + job.result() + # datetime, after the job is done running, in local time. + end_datetime = datetime.now().replace(tzinfo=tz.tzlocal()) + timedelta(seconds=1) + + self.assertTrue(job.time_per_step()) + for step, time_data in job.time_per_step().items(): + self.assertTrue( + (start_datetime <= time_data <= end_datetime), + 'job time step "{}={}" is not ' + "between the start date time {} and end date time {}".format( + step, time_data, start_datetime, end_datetime + ), + ) + + rjob = self.service.job(job.job_id()) + self.assertTrue(rjob.time_per_step()) + + @skip("queue_info supported in provider but not here") + def test_queue_info(self): + """Test retrieving queue information.""" + # Find the most busy backend. + backend = most_busy_backend(self.service) + leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING] + job = backend.run(self.bell) + queue_info = None + for _ in range(20): + queue_info = job.queue_info() + # Even if job status is queued, its queue info may not be immediately available. + if ( + job._status is JobStatus.QUEUED and job.queue_position() is not None + ) or job._status in leave_states: + break + time.sleep(1) + + if job._status is JobStatus.QUEUED and job.queue_position() is not None: + self.log.debug( + "Job id=%s, queue info=%s, queue position=%s", + job.job_id(), + queue_info, + job.queue_position(), + ) + msg = "Job {} is queued but has no ".format(job.job_id()) + self.assertIsNotNone(queue_info, msg + "queue info.") + for attr, value in queue_info.__dict__.items(): + self.assertIsNotNone(value, msg + attr) + self.assertTrue( + all( + 0 < priority <= 1.0 + for priority in [ + queue_info.hub_priority, + queue_info.group_priority, + queue_info.project_priority, + ] + ), + "Unexpected queue info {} for job {}".format(queue_info, job.job_id()), + ) + + self.assertTrue(queue_info.format()) + self.assertTrue(repr(queue_info)) From e6e7a3bfa27f2054caf46afac1ab2318cee1835d Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 8 Nov 2023 14:31:42 -0500 Subject: [PATCH 15/22] Add warning if DE env is used (#1201) * add warning if de env * add reno --- qiskit_ibm_runtime/qiskit_runtime_service.py | 5 +++++ releasenotes/notes/de-warning-6cf474f11578339c.yaml | 6 ++++++ 2 files changed, 11 insertions(+) create mode 100644 releasenotes/notes/de-warning-6cf474f11578339c.yaml diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index 67a4e2e08..4187817bd 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -210,6 +210,11 @@ def __init__( auth_client = self._authenticate_ibm_quantum_account(self._client_params) # Update client parameters to use authenticated values. self._client_params.url = auth_client.current_service_urls()["services"]["runtime"] + if self._client_params.url == "https://api.de.quantum-computing.ibm.com/runtime": + warnings.warn( + "Features in versions of qiskit-ibm-runtime greater than 0.13.0 may not " + "be supported in this environment" + ) self._client_params.token = auth_client.current_access_token() self._api_client = RuntimeClient(self._client_params) self._hgps = self._initialize_hgps(auth_client) diff --git a/releasenotes/notes/de-warning-6cf474f11578339c.yaml b/releasenotes/notes/de-warning-6cf474f11578339c.yaml new file mode 100644 index 000000000..6248078b0 --- /dev/null +++ b/releasenotes/notes/de-warning-6cf474f11578339c.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + A warning will be raised at initialization if the DE environment is being used since not all + features are supported there. + From e9f93eccec8c08cace01b636aee209b3a82b9749 Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Wed, 8 Nov 2023 22:14:36 +0200 Subject: [PATCH 16/22] Remove caching of results in RuntimeJob (#1205) * Removed storing result in RuntimeJob._results. Instead retrieve results every time the results() method is called * Release note --------- Co-authored-by: kevin-tian --- qiskit_ibm_runtime/ibm_backend.py | 1 + qiskit_ibm_runtime/runtime_job.py | 32 ++++++++---------- qiskit_ibm_runtime/utils/utils.py | 5 +-- .../no_cached_results-54d063390b9b0ae6.yaml | 5 +++ test/integration/test_ibm_job.py | 7 ++-- test/integration/test_ibm_job_attributes.py | 33 ++++++++++++++----- 6 files changed, 51 insertions(+), 32 deletions(-) create mode 100644 releasenotes/notes/no_cached_results-54d063390b9b0ae6.yaml diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index d70c81683..3c472c0bf 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -787,6 +787,7 @@ def _runtime_run( program_id=program_id, session_id=session_id, service=self.service, + tags=job_tags, ) logger.debug("Job %s was successfully submitted.", job.job_id()) except TypeError as err: diff --git a/qiskit_ibm_runtime/runtime_job.py b/qiskit_ibm_runtime/runtime_job.py index 9e9a80356..80b77f3f5 100644 --- a/qiskit_ibm_runtime/runtime_job.py +++ b/qiskit_ibm_runtime/runtime_job.py @@ -122,7 +122,6 @@ def __init__( """ super().__init__(backend=backend, job_id=job_id) self._api_client = api_client - self._results: Optional[Any] = None self._interim_results: Optional[Any] = None self._params = params or {} self._creation_date = creation_date @@ -212,25 +211,22 @@ def result( # pylint: disable=arguments-differ RuntimeInvalidStateError: If the job was cancelled, and attempting to retrieve result. """ _decoder = decoder or self._final_result_decoder - if self._results is None or (_decoder != self._final_result_decoder): - self.wait_for_final_state(timeout=timeout) - if self._status == JobStatus.ERROR: - error_message = self._reason if self._reason else self._error_message - if self._reason == "RAN TOO LONG": - raise RuntimeJobMaxTimeoutError(error_message) - raise RuntimeJobFailureError(f"Unable to retrieve job result. {error_message}") - if self._status is JobStatus.CANCELLED: - raise RuntimeInvalidStateError( - "Unable to retrieve result for job {}. " - "Job was cancelled.".format(self.job_id()) - ) - - result_raw = self._download_external_result( - self._api_client.job_results(job_id=self.job_id()) + self.wait_for_final_state(timeout=timeout) + if self._status == JobStatus.ERROR: + error_message = self._reason if self._reason else self._error_message + if self._reason == "RAN TOO LONG": + raise RuntimeJobMaxTimeoutError(error_message) + raise RuntimeJobFailureError(f"Unable to retrieve job result. {error_message}") + if self._status is JobStatus.CANCELLED: + raise RuntimeInvalidStateError( + "Unable to retrieve result for job {}. " "Job was cancelled.".format(self.job_id()) ) - self._results = _decoder.decode(result_raw) if result_raw else None - return self._results + result_raw = self._download_external_result( + self._api_client.job_results(job_id=self.job_id()) + ) + + return _decoder.decode(result_raw) if result_raw else None def cancel(self) -> None: """Cancel the job. diff --git a/qiskit_ibm_runtime/utils/utils.py b/qiskit_ibm_runtime/utils/utils.py index 83f2aeb42..23831b2f6 100644 --- a/qiskit_ibm_runtime/utils/utils.py +++ b/qiskit_ibm_runtime/utils/utils.py @@ -27,6 +27,7 @@ IAMAuthenticator, ) from ibm_platform_services import ResourceControllerV2 # pylint: disable=import-error +from qiskit_ibm_runtime.exceptions import IBMInputValueError def validate_job_tags(job_tags: Optional[List[str]]) -> None: @@ -36,12 +37,12 @@ def validate_job_tags(job_tags: Optional[List[str]]) -> None: job_tags: Job tags to be validated. Raises: - ValueError: If the job tags are invalid. + IBMInputValueError: If the job tags are invalid. """ if job_tags and ( not isinstance(job_tags, list) or not all(isinstance(tag, str) for tag in job_tags) ): - raise ValueError("job_tags needs to be a list of strings.") + raise IBMInputValueError("job_tags needs to be a list of strings.") def get_iam_api_url(cloud_url: str) -> str: diff --git a/releasenotes/notes/no_cached_results-54d063390b9b0ae6.yaml b/releasenotes/notes/no_cached_results-54d063390b9b0ae6.yaml new file mode 100644 index 000000000..5740d00b6 --- /dev/null +++ b/releasenotes/notes/no_cached_results-54d063390b9b0ae6.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Removed storing result in ``RuntimeJob._results``. Instead retrieve results every time the + ``results()`` method is called. diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index 0ab89b85b..dfffc21e4 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -286,9 +286,8 @@ def test_retrieve_jobs_order(self): ) self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs]) - @skip("how do we support refresh") def test_refresh_job_result(self): - """Test re-retrieving job result via refresh.""" + """Test re-retrieving job result.""" result = self.sim_job.result() # Save original cached results. @@ -300,8 +299,8 @@ def test_refresh_job_result(self): self.assertNotEqual(cached_result, result.to_dict()) self.assertEqual(result.results[0].header.name, "modified_result") - # Re-retrieve result via refresh. - result = self.sim_job.result(refresh=True) + # Re-retrieve result. + result = self.sim_job.result() self.assertDictEqual(cached_result, result.to_dict()) self.assertNotEqual(result.results[0].header.name, "modified_result") diff --git a/test/integration/test_ibm_job_attributes.py b/test/integration/test_ibm_job_attributes.py index c04768c18..66d18b22e 100644 --- a/test/integration/test_ibm_job_attributes.py +++ b/test/integration/test_ibm_job_attributes.py @@ -23,11 +23,10 @@ from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES from qiskit.test.reference_circuits import ReferenceCircuits -from qiskit_ibm_provider.exceptions import ( - IBMBackendValueError, -) +from qiskit_ibm_provider.exceptions import IBMBackendValueError from qiskit_ibm_runtime import IBMBackend, RuntimeJob +from qiskit_ibm_runtime.exceptions import IBMInputValueError from ..decorators import ( IntegrationTestDependencies, integration_test_setup, @@ -88,7 +87,7 @@ def test_job_creation_date(self): ) def test_esp_readout_not_enabled(self): - """Test that an error is thrown is ESP readout is used and the backend does not support it.""" + """Test that an error is thrown if ESP readout is used and the backend does not support it.""" # sim backend does not have ``measure_esp_enabled`` flag: defaults to ``False`` with self.assertRaises(IBMBackendValueError) as context_manager: self.sim_backend.run(self.bell, use_measure_esp=True) @@ -147,14 +146,32 @@ def test_job_tags(self): len(rjobs), 1, "Expected job {}, got {}".format(job.job_id(), rjobs) ) self.assertEqual(rjobs[0].job_id(), job.job_id()) - # TODO check why this sometimes fails - # self.assertEqual(set(rjobs[0].tags()), set(job_tags)) + self.assertEqual(set(rjobs[0].tags), set(job_tags)) + + def test_job_tags_replace(self): + """Test updating job tags by replacing a job's existing tags.""" + initial_job_tags = [uuid.uuid4().hex[:16]] + job = self.sim_backend.run(self.bell, job_tags=initial_job_tags) + + tags_to_replace_subtests = [ + [], # empty tags. + list("{}_new_tag_{}".format(uuid.uuid4().hex[:5], i) for i in range(2)), # unique tags. + initial_job_tags + ["foo"], + ] + for tags_to_replace in tags_to_replace_subtests: + with self.subTest(tags_to_replace=tags_to_replace): + # Update the job tags. + _ = job.update_tags(new_tags=tags_to_replace) + + # Wait a bit so we don't get cached results. + time.sleep(2) + self.assertEqual(set(tags_to_replace), set(job.tags)) def test_invalid_job_tags(self): """Test using job tags with an and operator.""" - self.assertRaises(ValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) + self.assertRaises(IBMInputValueError, self.sim_backend.run, self.bell, job_tags={"foo"}) self.assertRaises( - ValueError, + IBMInputValueError, self.service.jobs, job_tags=[1, 2, 3], ) From 77196655d9c12ca390789676ee078199a3292f5c Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 8 Nov 2023 17:38:04 -0500 Subject: [PATCH 17/22] Update integration tests (#1208) --- test/integration/test_backend.py | 8 +- test/integration/test_ibm_job.py | 128 ++------------------ test/integration/test_ibm_qasm_simulator.py | 45 ++----- 3 files changed, 29 insertions(+), 152 deletions(-) diff --git a/test/integration/test_backend.py b/test/integration/test_backend.py index 605ab8bc2..7c726b492 100644 --- a/test/integration/test_backend.py +++ b/test/integration/test_backend.py @@ -82,7 +82,7 @@ def setUpClass(cls): super().setUpClass() if cls.dependencies.channel == "ibm_cloud": # TODO use real device when cloud supports it - cls.backend = cls.dependencies.service.least_busy(simulator=False, min_num_qubits=5) + cls.backend = cls.dependencies.service.least_busy(min_num_qubits=5) if cls.dependencies.channel == "ibm_quantum": cls.backend = cls.dependencies.service.least_busy( simulator=False, min_num_qubits=5, instance=cls.dependencies.instance @@ -202,11 +202,15 @@ def test_backend_deepcopy(self): def test_backend_pending_jobs(self): """Test pending jobs are returned.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud account does not have real backend.") backends = self.service.backends() self.assertTrue(any(backend.status().pending_jobs > 0 for backend in backends)) def test_backend_fetch_all_qubit_properties(self): """Check retrieving properties of all qubits""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud channel does not have instance.") num_qubits = self.backend.num_qubits qubits = list(range(num_qubits)) qubit_properties = self.backend.qubit_properties(qubits) @@ -260,6 +264,8 @@ def test_retrieve_backend_not_exist(self): def test_too_many_qubits_in_circuit(self): """Check error message if circuit contains more qubits than supported on the backend.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud channel does not have instance.") num = len(self.backend.properties().qubits) num_qubits = num + 1 circuit = QuantumCircuit(num_qubits, num_qubits) diff --git a/test/integration/test_ibm_job.py b/test/integration/test_ibm_job.py index dfffc21e4..0c60c080a 100644 --- a/test/integration/test_ibm_job.py +++ b/test/integration/test_ibm_job.py @@ -14,7 +14,6 @@ import copy import time from datetime import datetime, timedelta -from threading import Thread, Event from unittest import SkipTest, mock from unittest import skip @@ -27,14 +26,9 @@ from qiskit_ibm_provider.api.rest.job import Job as RestJob from qiskit_ibm_provider.exceptions import IBMBackendApiError -from qiskit_ibm_runtime import IBMBackend, RuntimeJob from qiskit_ibm_runtime.api.exceptions import RequestsApiError from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError, RuntimeJobNotFound -from ..decorators import ( - IntegrationTestDependencies, - integration_test_setup_with_backend, -) -from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob + from ..ibm_test_case import IBMIntegrationTestCase from ..utils import ( most_busy_backend, @@ -46,23 +40,13 @@ class TestIBMJob(IBMIntegrationTestCase): """Test ibm_job module.""" - sim_backend: IBMBackend - real_device_backend: IBMBackend - bell = QuantumCircuit - sim_job: RuntimeJob - last_month: datetime - - @classmethod - @integration_test_setup_with_backend(simulator=False, min_num_qubits=2) - def setUpClass(cls, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: - """Initial class level setup.""" - # pylint: disable=arguments-differ - super().setUpClass(dependencies=dependencies) - cls.sim_backend = dependencies.service.backend("ibmq_qasm_simulator") - cls.real_device_backend = backend - cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend) - cls.sim_job = cls.sim_backend.run(cls.bell) - cls.last_month = datetime.now() - timedelta(days=30) + def setUp(self): + """Initial test setup.""" + super().setUp() + self.sim_backend = self.service.backend("ibmq_qasm_simulator") + self.bell = ReferenceCircuits.bell() + self.sim_job = self.sim_backend.run(self.bell) + self.last_month = datetime.now() - timedelta(days=30) def test_run_multiple_simulator(self): """Test running multiple jobs in a simulator.""" @@ -116,6 +100,8 @@ def test_run_multiple_simulator(self): def test_cancel(self): """Test job cancellation.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud account does not have real backend.") # Find the most busy backend backend = most_busy_backend(self.service) submit_and_cancel(backend, self.log) @@ -142,9 +128,7 @@ def test_retrieve_completed_jobs(self): def test_retrieve_pending_jobs(self): """Test retrieving jobs with the pending filter.""" - pending_job_list = self.service.jobs( - backend_name=self.sim_backend.name, limit=3, pending=True - ) + pending_job_list = self.service.jobs(program_id="sampler", limit=3, pending=True) for job in pending_job_list: self.assertTrue(job.status() in [JobStatus.QUEUED, JobStatus.RUNNING]) @@ -155,30 +139,6 @@ def test_retrieve_job(self): self.assertEqual(self.sim_job.inputs["circuits"], retrieved_job.inputs["circuits"]) self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts()) - def test_retrieve_job_uses_appropriate_backend(self): - """Test that retrieved jobs come from their appropriate backend.""" - backend_1 = self.real_device_backend - # Get a second backend. - backend_2 = None - service = self.real_device_backend.service - for my_backend in service.backends(): - if my_backend.status().operational and my_backend.name != backend_1.name: - backend_2 = my_backend - break - if not backend_2: - raise SkipTest("Skipping test that requires multiple backends") - - job_1 = backend_1.run(transpile(ReferenceCircuits.bell())) - job_2 = backend_2.run(transpile(ReferenceCircuits.bell())) - - # test a retrieved job's backend is the same as the queried backend - self.assertEqual(service.job(job_1.job_id()).backend().name, backend_1.name) - self.assertEqual(service.job(job_2.job_id()).backend().name, backend_2.name) - - # Cleanup - for job in [job_1, job_2]: - cancel_job_safe(job, self.log) - def test_retrieve_job_error(self): """Test retrieving an invalid job.""" self.assertRaises(RuntimeJobNotFound, self.service.job, "BAD_JOB_ID") @@ -304,72 +264,10 @@ def test_refresh_job_result(self): self.assertDictEqual(cached_result, result.to_dict()) self.assertNotEqual(result.results[0].header.name, "modified_result") - @skip("TODO update test case") - def test_wait_for_final_state(self): - """Test waiting for job to reach final state.""" - - def final_state_callback(c_job_id, c_status, c_job, **kwargs): - """Job status query callback function.""" - self.assertEqual(c_job_id, job.job_id()) - self.assertNotIn(c_status, JOB_FINAL_STATES) - self.assertEqual(c_job.job_id(), job.job_id()) - self.assertIn("queue_info", kwargs) - - queue_info = kwargs.pop("queue_info", None) - callback_info["called"] = True - - if wait_time is None: - # Look for status change. - data = {"status": c_status, "queue_info": queue_info} - self.assertNotEqual(data, callback_info["last data"]) - callback_info["last data"] = data - else: - # Check called within wait time. - if callback_info["last call time"] and job._status not in JOB_FINAL_STATES: - self.assertAlmostEqual( - time.time() - callback_info["last call time"], - wait_time, - delta=0.2, - ) - callback_info["last call time"] = time.time() - - def job_canceller(job_, exit_event, wait): - exit_event.wait(wait) - cancel_job_safe(job_, self.log) - - wait_args = [2, None] - - saved_api = self.sim_backend._api_client - try: - self.sim_backend._api_client = BaseFakeAccountClient(job_class=CancelableFakeJob) - for wait_time in wait_args: - with self.subTest(wait_time=wait_time): - # Put callback data in a dictionary to make it mutable. - callback_info = { - "called": False, - "last call time": 0.0, - "last data": {}, - } - cancel_event = Event() - job = self.sim_backend.run(self.bell) - # Cancel the job after a while. - Thread(target=job_canceller, args=(job, cancel_event, 7), daemon=True).start() - try: - job.wait_for_final_state( - timeout=10, wait=wait_time, callback=final_state_callback - ) - self.assertTrue(job.in_final_state()) - self.assertTrue(callback_info["called"]) - cancel_event.set() - finally: - # Ensure all threads ended. - for thread in job._executor._threads: - thread.join(0.1) - finally: - self.sim_backend._api_client = saved_api - def test_wait_for_final_state_timeout(self): """Test waiting for job to reach final state times out.""" + if self.dependencies.channel == "ibm_cloud": + raise SkipTest("Cloud account does not have real backend.") backend = most_busy_backend(TestIBMJob.service) job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend)) try: diff --git a/test/integration/test_ibm_qasm_simulator.py b/test/integration/test_ibm_qasm_simulator.py index 1cb074c84..daf96f11b 100644 --- a/test/integration/test_ibm_qasm_simulator.py +++ b/test/integration/test_ibm_qasm_simulator.py @@ -17,41 +17,25 @@ from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister from qiskit.compiler import transpile -from qiskit.providers.aer.noise import ( # pylint: disable=import-error,no-name-in-module - NoiseModel, -) from qiskit.test.reference_circuits import ReferenceCircuits -from qiskit_ibm_runtime import IBMBackend -from ..decorators import ( - integration_test_setup_with_backend, - IntegrationTestDependencies, -) from ..ibm_test_case import IBMIntegrationTestCase class TestIBMQasmSimulator(IBMIntegrationTestCase): """Test IBM Quantum QASM Simulator.""" - @integration_test_setup_with_backend(simulator=False) - def setUp(self, backend: IBMBackend, dependencies: IntegrationTestDependencies) -> None: - """Initial test setup.""" - # pylint: disable=unused-argument - # pylint: disable=arguments-differ - super().setUp() - self.sim_backend = self.service.backend("ibmq_qasm_simulator") - self.real_device_backend = backend - def test_execute_one_circuit_simulator_online(self): """Test execute_one_circuit_simulator_online.""" + backend = self.service.get_backend("ibmq_qasm_simulator") quantum_register = QuantumRegister(1) classical_register = ClassicalRegister(1) quantum_circuit = QuantumCircuit(quantum_register, classical_register, name="qc") quantum_circuit.h(quantum_register[0]) quantum_circuit.measure(quantum_register[0], classical_register[0]) - circs = transpile(quantum_circuit, backend=self.sim_backend) + circs = transpile(quantum_circuit, backend=backend) shots = 1024 - job = self.sim_backend.run(circs, shots=shots) + job = backend.run(circs, shots=shots) result = job.result() counts = result.get_counts(quantum_circuit) target = {"0": shots / 2, "1": shots / 2} @@ -60,6 +44,7 @@ def test_execute_one_circuit_simulator_online(self): def test_execute_several_circuits_simulator_online(self): """Test execute_several_circuits_simulator_online.""" + backend = self.service.get_backend("ibmq_qasm_simulator") quantum_register = QuantumRegister(2) classical_register = ClassicalRegister(2) qcr1 = QuantumCircuit(quantum_register, classical_register, name="qc1") @@ -72,8 +57,8 @@ def test_execute_several_circuits_simulator_online(self): qcr2.measure(quantum_register[0], classical_register[0]) qcr2.measure(quantum_register[1], classical_register[1]) shots = 1024 - circs = transpile([qcr1, qcr2], backend=self.sim_backend) - job = self.sim_backend.run(circs, shots=shots) + circs = transpile([qcr1, qcr2], backend=backend) + job = backend.run(circs, shots=shots) result = job.result() counts1 = result.get_counts(qcr1) counts2 = result.get_counts(qcr2) @@ -85,6 +70,7 @@ def test_execute_several_circuits_simulator_online(self): def test_online_qasm_simulator_two_registers(self): """Test online_qasm_simulator_two_registers.""" + backend = self.service.get_backend("ibmq_qasm_simulator") qr1 = QuantumRegister(2) cr1 = ClassicalRegister(2) qr2 = QuantumRegister(2) @@ -101,8 +87,8 @@ def test_online_qasm_simulator_two_registers(self): qcr2.measure(qr1[1], cr1[1]) qcr2.measure(qr2[0], cr2[0]) qcr2.measure(qr2[1], cr2[1]) - circs = transpile([qcr1, qcr2], self.sim_backend) - job = self.sim_backend.run(circs, shots=1024) + circs = transpile([qcr1, qcr2], backend) + job = backend.run(circs, shots=1024) result = job.result() result1 = result.get_counts(qcr1) result2 = result.get_counts(qcr2) @@ -156,16 +142,3 @@ def _new_submit(qobj, *args, **kwargs): finally: backend._configuration._data["simulation_method"] = sim_method backend._submit_job = submit_fn - - # @skip( - # "NoiseModel.from_backend does not currently support V2 Backends. \ - # Skip test until it's fixed in aer." - # ) - def test_simulator_with_noise_model(self): - """Test using simulator with a noise model.""" - noise_model = NoiseModel.from_backend(self.real_device_backend) - result = self.sim_backend.run( - transpile(ReferenceCircuits.bell(), backend=self.sim_backend), - noise_model=noise_model, - ).result() - self.assertTrue(result) From e54a9fb0c095efc0a12aa9820103b40e1293e17d Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 8 Nov 2023 18:48:18 -0500 Subject: [PATCH 18/22] add "coords" to good_keys (#1209) --- test/integration/test_backend_serialization.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/test_backend_serialization.py b/test/integration/test_backend_serialization.py index f17b9c494..b97d81053 100644 --- a/test/integration/test_backend_serialization.py +++ b/test/integration/test_backend_serialization.py @@ -41,6 +41,7 @@ def test_backend_configuration(self, service): "backend_version", "rep_delay_range", "processor_type.revision", + "coords", ) good_keys_prefixes = ("channels",) From 11965560fd126b8dcea5588a45f55b9fab7b4479 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 9 Nov 2023 12:22:47 -0500 Subject: [PATCH 19/22] Add channel strategy instance validation (#1193) * q-ctrl instance validation * add reno * Update releasenotes/notes/channel-strategy-instance-validate-639b18b8c5d44678.yaml Co-authored-by: merav-aharoni --------- Co-authored-by: merav-aharoni --- qiskit_ibm_runtime/api/clients/runtime.py | 8 ++++++++ qiskit_ibm_runtime/api/rest/runtime.py | 10 ++++++++++ qiskit_ibm_runtime/qiskit_runtime_service.py | 13 +++++++++++++ ...strategy-instance-validate-639b18b8c5d44678.yaml | 5 +++++ 4 files changed, 36 insertions(+) create mode 100644 releasenotes/notes/channel-strategy-instance-validate-639b18b8c5d44678.yaml diff --git a/qiskit_ibm_runtime/api/clients/runtime.py b/qiskit_ibm_runtime/api/clients/runtime.py index 9222248e7..f59708cff 100644 --- a/qiskit_ibm_runtime/api/clients/runtime.py +++ b/qiskit_ibm_runtime/api/clients/runtime.py @@ -374,6 +374,14 @@ def list_backends( """ return self._api.backends(hgp=hgp, channel_strategy=channel_strategy)["devices"] + def cloud_instance(self) -> bool: + """Returns a boolean of whether or not the instance has q-ctrl enabled. + + Returns: + Boolean value. + """ + return self._api.cloud_instance() + def backend_configuration(self, backend_name: str) -> Dict[str, Any]: """Return the configuration of the IBM backend. diff --git a/qiskit_ibm_runtime/api/rest/runtime.py b/qiskit_ibm_runtime/api/rest/runtime.py index df264c7a2..856a677f9 100644 --- a/qiskit_ibm_runtime/api/rest/runtime.py +++ b/qiskit_ibm_runtime/api/rest/runtime.py @@ -36,6 +36,7 @@ class Runtime(RestAdapterBase): "programs": "/programs", "jobs": "/jobs", "backends": "/backends", + "cloud_instance": "/instance", } def program(self, program_id: str) -> "Program": @@ -293,3 +294,12 @@ def backends( if channel_strategy: params["channel_strategy"] = channel_strategy return self.session.get(url, params=params, timeout=timeout).json() + + def cloud_instance(self) -> bool: + """Return boolean of whether or not the instance has q-ctrl enabled. + + Returns: + Boolean value. + """ + url = self.get_url("cloud_instance") + return self.session.get(url).json().get("qctrl_enabled") diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index 4187817bd..b0d1ad3c9 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -205,6 +205,7 @@ def __init__( # TODO: We can make the backend discovery lazy self._backends = self._discover_cloud_backends() QiskitRuntimeService.global_service = self + self._validate_channel_strategy() return else: auth_client = self._authenticate_ibm_quantum_account(self._client_params) @@ -314,6 +315,18 @@ def _discover_account( return account + def _validate_channel_strategy(self) -> None: + """Raise an error if the passed in channel_strategy and + instance do not match. + + """ + if self._channel_strategy == "q-ctrl": + qctrl_enabled = self._api_client.cloud_instance() + if not qctrl_enabled: + raise IBMNotAuthorizedError( + "This account is not authorized to use ``q-ctrl`` as a channel strategy." + ) + def _discover_cloud_backends(self) -> Dict[str, "ibm_backend.IBMBackend"]: """Return the remote backends available for this service instance. diff --git a/releasenotes/notes/channel-strategy-instance-validate-639b18b8c5d44678.yaml b/releasenotes/notes/channel-strategy-instance-validate-639b18b8c5d44678.yaml new file mode 100644 index 000000000..6c74e2fb6 --- /dev/null +++ b/releasenotes/notes/channel-strategy-instance-validate-639b18b8c5d44678.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + An error will be raised during initialization if ``q-ctrl`` is passed in as the ``channel_strategy`` and + the account instance does not have ``q-ctrl`` enabled. From b146fac4d9854d9f2ae0b9d9679f858db222e490 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Fri, 10 Nov 2023 15:14:31 -0500 Subject: [PATCH 20/22] Patch warning wording (#1212) --- qiskit_ibm_runtime/qiskit_runtime_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qiskit_ibm_runtime/qiskit_runtime_service.py b/qiskit_ibm_runtime/qiskit_runtime_service.py index b0d1ad3c9..6ef274fad 100644 --- a/qiskit_ibm_runtime/qiskit_runtime_service.py +++ b/qiskit_ibm_runtime/qiskit_runtime_service.py @@ -213,8 +213,8 @@ def __init__( self._client_params.url = auth_client.current_service_urls()["services"]["runtime"] if self._client_params.url == "https://api.de.quantum-computing.ibm.com/runtime": warnings.warn( - "Features in versions of qiskit-ibm-runtime greater than 0.13.0 may not " - "be supported in this environment" + "Features in versions of qiskit-ibm-runtime greater than and including " + "0.13.0 may not be supported in this environment" ) self._client_params.token = auth_client.current_access_token() self._api_client = RuntimeClient(self._client_params) From fd00791f365f3dc84b8f5999a4c095f794777b50 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Fri, 10 Nov 2023 17:14:37 -0500 Subject: [PATCH 21/22] Remove circuit & observable indicies run inputs (#1211) --- qiskit_ibm_runtime/estimator.py | 2 -- qiskit_ibm_runtime/sampler.py | 1 - .../notes/remove-circuit-indicies-e8af9da213e463e9.yaml | 6 ++++++ 3 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/remove-circuit-indicies-e8af9da213e463e9.yaml diff --git a/qiskit_ibm_runtime/estimator.py b/qiskit_ibm_runtime/estimator.py index 73e20b903..1e4160332 100644 --- a/qiskit_ibm_runtime/estimator.py +++ b/qiskit_ibm_runtime/estimator.py @@ -176,9 +176,7 @@ def _run( # pylint: disable=arguments-differ """ inputs = { "circuits": circuits, - "circuit_indices": list(range(len(circuits))), "observables": observables, - "observable_indices": list(range(len(observables))), "parameters": [circ.parameters for circ in circuits], "parameter_values": parameter_values, } diff --git a/qiskit_ibm_runtime/sampler.py b/qiskit_ibm_runtime/sampler.py index bfb7cc2b4..e96554974 100644 --- a/qiskit_ibm_runtime/sampler.py +++ b/qiskit_ibm_runtime/sampler.py @@ -143,7 +143,6 @@ def _run( # pylint: disable=arguments-differ inputs = { "circuits": circuits, "parameters": [circ.parameters for circ in circuits], - "circuit_indices": list(range(len(circuits))), "parameter_values": parameter_values, } return self._run_primitive( diff --git a/releasenotes/notes/remove-circuit-indicies-e8af9da213e463e9.yaml b/releasenotes/notes/remove-circuit-indicies-e8af9da213e463e9.yaml new file mode 100644 index 000000000..ca2d327fd --- /dev/null +++ b/releasenotes/notes/remove-circuit-indicies-e8af9da213e463e9.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + The ``circuit_indices`` and ``observable_indices`` run inputs for + :class:`~qiskit_ibm_runtime.Estimator` and :class:`~qiskit_ibm_runtime.Sampler` + have been completely removed. \ No newline at end of file From ac4efe2102ce0054e06d5e48c7e461ab080fb8fa Mon Sep 17 00:00:00 2001 From: merav-aharoni Date: Mon, 13 Nov 2023 19:10:25 +0200 Subject: [PATCH 22/22] Add method close_session for IBMBackend (#1213) * Added IBMBackend.close_session method * Added test --- qiskit_ibm_runtime/ibm_backend.py | 10 ++++++++++ test/integration/test_session.py | 8 ++++++++ 2 files changed, 18 insertions(+) diff --git a/qiskit_ibm_runtime/ibm_backend.py b/qiskit_ibm_runtime/ibm_backend.py index 3c472c0bf..b90f5789b 100644 --- a/qiskit_ibm_runtime/ibm_backend.py +++ b/qiskit_ibm_runtime/ibm_backend.py @@ -840,6 +840,16 @@ def cancel_session(self) -> None: self._session = None + def close_session(self) -> None: + """Close the session so new jobs will no longer be accepted, but existing + queued or running jobs will run to completion. The session will be terminated once there + are no more pending jobs.""" + if self._session: + self._session.cancel() + if self._session.session_id: + self._api_client.close_session(self._session.session_id) + self._session = None + class IBMRetiredBackend(IBMBackend): """Backend class interfacing with an IBM Quantum device no longer available.""" diff --git a/test/integration/test_session.py b/test/integration/test_session.py index 6b7d77c5d..28238a6a6 100644 --- a/test/integration/test_session.py +++ b/test/integration/test_session.py @@ -135,6 +135,14 @@ def test_session_cancel(self): backend.cancel_session() self.assertIsNone(backend.session) + def test_session_close(self): + """Test closing a session""" + backend = self.service.backend("ibmq_qasm_simulator") + backend.open_session() + self.assertTrue(backend.session.active) + backend.close_session() + self.assertIsNone(backend.session) + def test_run_after_cancel(self): """Test running after session is cancelled.""" backend = self.service.backend("ibmq_qasm_simulator")