diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 4d0350056f3..3c2d7148f90 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -6,6 +6,7 @@ on: - master - 'feature/**' - '*-lcm' + merge_group: jobs: ocaml-format: diff --git a/.github/workflows/generate-and-build-sdks.yml b/.github/workflows/generate-and-build-sdks.yml index 53ada2588da..9c263900f77 100644 --- a/.github/workflows/generate-and-build-sdks.yml +++ b/.github/workflows/generate-and-build-sdks.yml @@ -55,6 +55,12 @@ jobs: _build/install/default/xapi/sdk/go/* !_build/install/default/xapi/sdk/go/dune + - name: Store Java SDK source + uses: actions/upload-artifact@v4 + with: + name: SDK_Source_Java + path: _build/install/default/xapi/sdk/java/* + - name: Trim dune cache run: opam exec -- dune cache trim --size=2GiB @@ -84,6 +90,39 @@ jobs: source/* !source/src/*.o + build-java-sdk: + name: Build Java SDK + runs-on: ubuntu-latest + needs: generate-sdk-sources + steps: + - name: Install dependencies + run: sudo apt-get install maven + + - name: Retrieve Java SDK source + uses: actions/download-artifact@v4 + with: + name: SDK_Source_Java + path: source/ + + - name: Set up JDK 17 + uses: actions/setup-java@v4 + with: + java-version: '17' + distribution: 'temurin' + + - name: Build Java SDK + shell: bash + run: | + xapi_version="${{ inputs.xapi_version }}" + xapi_version="${xapi_version//v/}" + mkdir -p target && mvn -f source/xen-api/pom.xml -B -Drevision=$xapi_version-prerelease clean package && mv source/xen-api/target/*.jar target/ + + - name: Store Java SDK + uses: actions/upload-artifact@v4 + with: + name: SDK_Artifacts_Java + path: target/* + build-csharp-sdk: name: Build C# SDK runs-on: windows-2022 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d4bf28aaab2..580b27f6288 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -6,9 +6,10 @@ on: schedule: # run daily, this refreshes the cache - cron: "13 2 * * *" + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: ${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: @@ -45,7 +46,6 @@ jobs: - name: Make install smoketest run: | opam exec -- make install DESTDIR=$(mktemp -d) - opam exec -- make install DESTDIR=$(mktemp -d) BUILD_PY2=NO - name: Check disk space run: df -h || true diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 3ea9bd1498c..d6ad9c849a6 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -6,9 +6,10 @@ on: schedule: # run daily, this refreshes the cache - cron: "13 2 * * *" + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: ${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: @@ -18,7 +19,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["2.7", "3.11"] + python-version: ["3.11"] steps: - name: Checkout code uses: actions/checkout@v4 @@ -39,48 +40,11 @@ jobs: - uses: pre-commit/action@v3.0.1 name: Run pre-commit checks (no spaces at end of lines, etc) - if: ${{ matrix.python-version != '2.7' }} with: extra_args: --all-files --verbose --hook-stage commit env: SKIP: no-commit-to-branch - - name: Install dependencies only needed for python 2 - if: ${{ matrix.python-version == '2.7' }} - run: pip install enum - - - name: Install dependencies only needed for python 3 - if: ${{ matrix.python-version != '2.7' }} - run: pip install opentelemetry-api opentelemetry-exporter-zipkin-json opentelemetry-sdk pandas pytype toml wrapt - - - name: Install common dependencies for Python ${{matrix.python-version}} - run: pip install future mock pytest-coverage pytest-mock - - - name: Run Pytest for python 2 and get code coverage - if: ${{ matrix.python-version == '2.7' }} - run: > - pytest - --cov=scripts --cov=ocaml/xcp-rrdd - scripts/ ocaml/xcp-rrdd -vv -rA - --junitxml=.git/pytest${{matrix.python-version}}.xml - --cov-report term-missing - --cov-report xml:.git/coverage${{matrix.python-version}}.xml - env: - PYTHONDEVMODE: yes - - - name: Run Pytest for python 3 and get code coverage - if: ${{ matrix.python-version != '2.7' }} - run: > - pytest - --cov=scripts --cov=ocaml/xcp-rrdd --cov=python3/ - scripts/ ocaml/xcp-rrdd python3/ -vv -rA - --junitxml=.git/pytest${{matrix.python-version}}.xml - --cov-report term-missing - --cov-report xml:.git/coverage${{matrix.python-version}}.xml - env: - PYTHONDEVMODE: yes - PYTHONPATH: "python3:python3/tests/stubs" - - name: Upload coverage report to Coveralls uses: coverallsapp/github-action@v2 with: @@ -90,7 +54,6 @@ jobs: parallel: true - uses: dciborow/action-pylint@0.1.0 - if: ${{ matrix.python-version != '2.7' }} with: reporter: github-pr-review level: warning @@ -99,8 +62,7 @@ jobs: continue-on-error: true - name: Run pytype checks - if: ${{ matrix.python-version != '2.7' }} - run: ./pytype_reporter.py + run: pip install pandas pytype toml && ./pytype_reporter.py env: PR_NUMBER: ${{ github.event.number }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9a051ef15f9..919cf406127 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: XenAPI - path: scripts/examples/python/dist/ + path: python3/examples/dist/ build-sdks: name: Build and upload SDK artifacts @@ -64,6 +64,12 @@ jobs: name: SDK_Artifacts_C path: libxenserver/usr/local/ + - name: Retrieve Java SDK distribution artifacts + uses: actions/download-artifact@v4 + with: + name: SDK_Artifacts_Java + path: dist/ + - name: Retrieve C# SDK distribution artifacts uses: actions/download-artifact@v4 with: diff --git a/.github/workflows/setup-xapi-environment/action.yml b/.github/workflows/setup-xapi-environment/action.yml index 541510bb8f8..8381e31117b 100644 --- a/.github/workflows/setup-xapi-environment/action.yml +++ b/.github/workflows/setup-xapi-environment/action.yml @@ -28,9 +28,9 @@ runs: shell: bash run: sudo apt-get update - - name: Install python2 + - name: Install python3 shell: bash - run: sudo apt-get install python2 + run: sudo apt-get install python3 - name: Use disk with more space for TMPDIR and XDG_CACHE_HOME shell: bash diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index c17568d821c..b078eaba549 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -2,9 +2,10 @@ name: ShellCheck on: pull_request: + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: sc-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: sc-${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: diff --git a/.gitignore b/.gitignore index 967e463c15f..b519eb9cb39 100644 --- a/.gitignore +++ b/.gitignore @@ -16,10 +16,10 @@ config.mk # python packaging **/__pycache__/ **/*.pyc -scripts/examples/python/setup.py -scripts/examples/python/XenAPI.egg-info/ -scripts/examples/python/build/ -scripts/examples/python/dist/ +python3/examples/setup.py +python3/examples/XenAPI.egg-info/ +python3/examples/build/ +python3/examples/dist/ # ignore file needed for building the SDK ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml diff --git a/.ocamlformat b/.ocamlformat index f86522707f6..77d9adfc386 100644 --- a/.ocamlformat +++ b/.ocamlformat @@ -7,3 +7,4 @@ break-separators=before break-infix=fit-or-vertical break-infix-before-func=false sequence-blank-line=preserve-one +ocaml-version=4.14 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0ca5ef37fee..e8fb2f37e0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,6 +14,8 @@ # pre-commit run -av --hook-stage pre-push # default_stages: [commit, push] +default_language_version: + python: python3.11 repos: # Recommendation for a minimal git pre-commit hook: # https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md: @@ -29,6 +31,95 @@ repos: - id: check-executables-have-shebangs exclude: ocaml + +# Improve Python formatting incrementally: +# https://dev.to/akaihola/improving-python-code-incrementally-3f7a +# +# darker checks if staged python changes are formatted according using +# the PEP8-aligned black formatter. It also checks if the imports are sorted. +# +# It is a good idea to run this before committing, and it is also run in the +# GitHub Workflow. +# +# Note: darker only checks the changes in files ending in .py! +# Python scripts that don't end in .py should be renamed to have the .py extension +# when moving them to python3/bin. +# (remove the .py extension in the Makefile when installing the file) +# +- repo: https://github.com/akaihola/darker + rev: 1.7.3 + hooks: + - id: darker + files: python3/ + name: check changes in Python3 tree using darker and isort + args: [--diff, --skip-string-normalization, --isort, -tpy36] + additional_dependencies: [isort] + +# +# Run pytest and diff-cover to check that the new /python3 test suite in passes. +# This hook uses a local venv containing the required dependencies. When adding +# new dependencies, they should be added to the additional_dependencies below. +# +- repo: local + hooks: + - id: pytest + files: python3/ + name: check that the Python3 test suite in passes + entry: env PYTHONDEVMODE=yes sh -c 'coverage run && coverage xml && + coverage html && coverage report && + diff-cover --ignore-whitespace --compare-branch=origin/master + --show-uncovered --html-report .git/coverage-diff.html + --fail-under 50 .git/coverage3.11.xml' + require_serial: true + pass_filenames: false + language: python + types: [python] + additional_dependencies: + - coverage + - diff-cover + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest-mock + - mock + - wrapt + - XenAPI + + +- repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.372 + hooks: + - id: pyright + name: check that python3 tree passes pyright/VSCode check + files: python3/ + additional_dependencies: + - mock + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest + - pyudev + - XenAPI + + +# Check that pylint passes for the changes in new /python3 code. +- repo: local + hooks: + - id: pylint + files: python3/ + stages: [push] + name: check that changes to python3 tree pass pylint + entry: diff-quality --violations=pylint + --ignore-whitespace --compare-branch=origin/master + pass_filenames: false + language: python + types: [python] + additional_dependencies: [diff-cover, pylint, pytest] + + +# pre-push hook (it only runs if you install pre-commit as a pre-push hook): +# It can be manually tested using: `pre-commit run -av --hook-stage push` # Recommendation for a minimal git pre-push hook: # While using pre-commit yields great results, it # is "not fast". Therefore only run it pre-push, @@ -53,4 +144,12 @@ repos: # developers have such version installed, it can be configured here: # language_version: python3.11 require_serial: true - additional_dependencies: [pandas, pytype] + additional_dependencies: + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pandas + - pytest + - pytype + files: python3/ diff --git a/Makefile b/Makefile index efa6394047f..337e4dad88c 100644 --- a/Makefile +++ b/Makefile @@ -68,9 +68,7 @@ test: trap "kill $${PSTREE_SLEEP_PID}" INT TERM EXIT; \ timeout --foreground $(TEST_TIMEOUT2) \ dune runtest --profile=$(PROFILE) --error-reporting=twice -j $(JOBS) -ifneq ($(PY_TEST), NO) dune build @runtest-python --profile=$(PROFILE) -endif stresstest: dune build @stresstest --profile=$(PROFILE) --no-buffer -j $(JOBS) @@ -123,7 +121,7 @@ sdk: cp -r _build/default/ocaml/sdk-gen/java/autogen/* $(XAPISDK)/java cp -r _build/default/ocaml/sdk-gen/powershell/autogen/* $(XAPISDK)/powershell cp -r _build/default/ocaml/sdk-gen/go/autogen/* $(XAPISDK)/go - cp scripts/examples/python/XenAPI/XenAPI.py $(XAPISDK)/python + cp python3/examples/XenAPI/XenAPI.py $(XAPISDK)/python sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/csharp sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/powershell @@ -138,7 +136,7 @@ sdk-build-java: sdk cd _build/install/default/xapi/sdk/java && mvn -f xen-api/pom.xml -B clean package install -Drevision=0.0 python: - $(MAKE) -C scripts/examples/python build + $(MAKE) -C python3/examples build doc-json: dune exec --profile=$(PROFILE) -- ocaml/idl/json_backend/gen_json.exe -destdir $(XAPIDOC)/jekyll @@ -239,7 +237,6 @@ install: build doc sdk doc-json install -D ./ocaml/xenopsd/scripts/block $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/block install -D ./ocaml/xenopsd/scripts/xen-backend.rules $(DESTDIR)/$(ETCDIR)/udev/rules.d/xen-backend.rules install -D ./ocaml/xenopsd/scripts/tap $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/tap - install -D ./ocaml/xenopsd/scripts/qemu-vif-script $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/qemu-vif-script install -D ./ocaml/xenopsd/scripts/setup-vif-rules $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/setup-vif-rules install -D ./_build/install/default/bin/pvs-proxy-ovs-setup $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/pvs-proxy-ovs-setup (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -s pvs-proxy-ovs-setup setup-pvs-proxy-rules) diff --git a/clock.opam b/clock.opam index 45b4fd162c2..73192316295 100644 --- a/clock.opam +++ b/clock.opam @@ -13,6 +13,7 @@ depends: [ "astring" "mtime" "ptime" + "xapi-log" {= version} "qcheck-core" {with-test} "qcheck-alcotest" {with-test} "odoc" {with-doc} diff --git a/configure.ml b/configure.ml index e5c37d55fbc..91eee0f0ed7 100644 --- a/configure.ml +++ b/configure.ml @@ -57,6 +57,8 @@ let args = ; flag "yumplugindir" ~doc:"DIR YUM plugins" ~default:"/usr/lib/yum-plugins" ; flag "yumpluginconfdir" ~doc:"DIR YUM plugins conf dir" ~default:"/etc/yum/pluginconf.d" + ; flag "xapi_api_version_major" ~doc:"xapi api major version" ~default:"2" + ; flag "xapi_api_version_minor" ~doc:"xapi api minor version" ~default:"21" ] |> Arg.align @@ -84,7 +86,7 @@ let () = in List.iter print_endline lines ; (* Expand @LIBEXEC@ in udev rules *) - match Hashtbl.find_opt config "XENOPSD_LIBEXECDIR" with + ( match Hashtbl.find_opt config "XENOPSD_LIBEXECDIR" with | Some xenopsd_libexecdir -> expand "@LIBEXEC@" xenopsd_libexecdir "ocaml/xenopsd/scripts/vif.in" "ocaml/xenopsd/scripts/vif" ; @@ -93,3 +95,17 @@ let () = "ocaml/xenopsd/scripts/xen-backend.rules" | None -> failwith "xenopsd_libexecdir not set" + ) ; + + match + ( Hashtbl.find_opt config "XAPI_API_VERSION_MAJOR" + , Hashtbl.find_opt config "XAPI_API_VERSION_MINOR" + ) + with + | Some xapi_api_version_major, Some xapi_api_version_minor -> + expand "@APIVERMAJ@" xapi_api_version_major "ocaml/idl/api_version.ml.in" + "ocaml/idl/api_version.ml.in2" ; + expand "@APIVERMIN@" xapi_api_version_minor "ocaml/idl/api_version.ml.in2" + "ocaml/idl/api_version.ml" + | _, _ -> + failwith "xapi_api_version_major or xapi_api_version_minor not set" diff --git a/doc/content/_index.md b/doc/content/_index.md index 549b2152ed6..20a6f95a521 100644 --- a/doc/content/_index.md +++ b/doc/content/_index.md @@ -8,7 +8,7 @@ The **XAPI Toolstack**: - Forms the control plane of both [XenServer](http://xenserver.com) as well as [xcp-ng](http://xcp-ng.org), - manages clusters of Xen hosts with shared storage and networking, -- has a full-featured [API](http://xapi-project-github.io/xen-api), used by clients such as +- has a full-featured [API](http://xapi-project.github.io/xen-api), used by clients such as [XenCenter](https://github.com/xenserver/xenadmin) and [Xen Orchestra](https://xen-orchestra.com). The XAPI Toolstack is an open-source project developed by the [xapi diff --git a/doc/content/python/_index.md b/doc/content/python/_index.md new file mode 100644 index 00000000000..773f02ce38c --- /dev/null +++ b/doc/content/python/_index.md @@ -0,0 +1,129 @@ +--- +title: "Python" +--- + +Introduction +------------ + +Most Python3 scripts and plugins shall be located below the `python3` directory. +The structure of the directory is as follows: + +- `python3/bin`: This contains files installed in `/opt/xensource/bin` + and are meant to be run by users +- `python3/libexec`: This contains files installed in `/opt/xensource/libexec` + and are meant to only be run by `xapi` and other daemons. +- `python3/packages`: Contains files to be installed in python's `site-packages` + are meant to be modules and packages to be imported by other scripts + or executed via `python3 -m` +- `python3/plugins`: This contains files that + are meant to be `xapi` plugins +- `python3/tests`: Tests for testing and covering the Python scripts and plugins + +Dependencies for development and testing +---------------------------------------- + +In GitHub CI and local testing, we can use [pre-commit] to execute the tests. +It provides a dedicated, clearly defined and always consistent Python environment. +The easiest way to run all tests and checks is to simply run [pre-commit]. +The example commands below assume that you have Python3 in your PATH. +Currently, Python 3.11 is required for it: + +```bash { title="Installing and running pre-commit" } +pip3 install pre-commit +pre-commit run -av +# Or, to just run the pytest hook: +pre-commit run -av pytest +``` + +> Note: By default, CentOS 8 provides Python 3.6, whereas some tests need Python >= 3.7 + +Alternatively, you can of course tests in any suitable environment, +given that you install the supported versions of all dependencies. +You can find the dependencies in the list [additional_dependencies] of the [pytest] hook +in the [pre-commit] configuration file [.pre-commit-config.yaml]. +{{% expand title= +"Example `pytest` hook from `.pre-commit-config.yaml` (expand)" %}} + +```yaml + hooks: + - id: pytest + files: python3/ + name: check that the Python3 test suite in passes + entry: sh -c 'coverage run && coverage xml && + coverage html && coverage report && + diff-cover --ignore-whitespace --compare-branch=origin/master + --show-uncovered --html-report .git/coverage-diff.html + --fail-under 50 .git/coverage3.11.xml' + require_serial: true + pass_filenames: false + language: python + types: [python] + additional_dependencies: + - coverage + - diff-cover + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest-mock + - mock + - wrapt + - XenAPI +``` + +{{% /expand %}} + +Coverage +-------- + +Code moved to the python3 directory tree shall have good code coverage using +tests that are executed, verified and covered using [pytest] and [Coverage.py]. +The `coverage` tool and [pytest] are configured in `pyproject.toml` and +`coverage run` is configured to run [pytest] by default. + +`coverage run` collects coverage from the run and stores it in its database. +The most simple command line to run and report coverage to stdout is: +`coverage run && coverage report` + +{{% expand title="Other commands also used in the pytest hook example above (expand)" %}} + +- `coverage xml`: Generates an XML report from the coverage database to + `.git/coverage3.11.xml`. It is needed for upload to +- `coverage html`: Generates an HTML report from the coverage database to + `.git/coverage_html/` +{{% /expand %}} + +We configure the file paths used for the generated database and other coverage +configuration in the sections `[tool.coverage.run]` and `[tool.coverage.report]` +of `pyproject.toml`. + +Pytest +------ + +If your Python environment has the [dependencies for the tests] installed, you +can run [pytest] in this environment without any arguments to use the defaults. + +{{% expand title="For development, pytest can also only run one test (expand)" %}} + +To run a specific pytest command, run pytest and pass the test case to it (example): + +```bash { title="Example for running only one specific test" } +pytest python3/tests/test_perfmon.py +``` + +```bash { title="Running only one test and reporting the code coverage of it" } +coverage run -m pytest python3/tests/test_perfmon.py && coverage report +``` + +{{% /expand %}} + +[coverage.py]: https://coverage.readthedocs.io +"coverage.py is the coverage collector for Python" +[dependencies for the tests]: #dependencies-for-development-and-testing +"Installation of the dependencies for development and testing" +[pytest]: https://docs.pytest.org "Pytest documentation" +[pre-commit]: https://pre-commit.com "pre-commit commit hook framework" +[.pre-commit-config.yaml]: https://pre-commit.com/#adding-pre-commit-plugins-to-your-project +"project-specific configuration file of pre-commit, found in the project's top directory" +[additional_dependencies]: https://pre-commit.com/#pre-commit-configyaml---hooks +"dependencies that will be installed in the environment where this hook gets to run" diff --git a/doc/content/toolstack/features/HA/index.md b/doc/content/toolstack/features/HA/index.md index 45918ac9269..b0db7feac5d 100644 --- a/doc/content/toolstack/features/HA/index.md +++ b/doc/content/toolstack/features/HA/index.md @@ -531,7 +531,7 @@ type ('a, 'b) configuration = { Note that: - the memory required by the VMs listed in `placement` has already been - substracted from the total memory of the hosts; it doesn't need to be + substracted from the free memory of the hosts; it doesn't need to be subtracted again. - the free memory of each host has already had per-host miscellaneous overheads subtracted from it, including that used by unprotected VMs, @@ -551,10 +551,10 @@ sig end ``` -The function `get_specific_plan` takes a configuration and a list of Hosts -which have failed. It returns a VM restart plan represented as a VM to Host -association list. This is the function called by the -background HA VM restart thread on the master. +The function `get_specific_plan` takes a configuration and a list of VMs( +the host where they are resident on have failed). It returns a VM restart +plan represented as a VM to Host association list. This is the function +called by the background HA VM restart thread on the master. The function `plan_always_possible` returns true if every sequence of Host failures of length diff --git a/doc/content/xapi/storage/_index.md b/doc/content/xapi/storage/_index.md index c265353869a..009ceabd4bd 100644 --- a/doc/content/xapi/storage/_index.md +++ b/doc/content/xapi/storage/_index.md @@ -245,7 +245,7 @@ From this interface we generate and appear in the` _build/default/python/xapi/storage/api/v5` directory. - On a XenServer host, they are stored in the - `/usr/lib/python2.7/site-packages/xapi/storage/api/v5/` + `/usr/lib/python3.6/site-packages/xapi/storage/api/v5/` directory ### SMAPIv3 Plugins diff --git a/doc/content/xapi/storage/sxm.md b/doc/content/xapi/storage/sxm.md index 8429f87321c..ee3b90276cc 100644 --- a/doc/content/xapi/storage/sxm.md +++ b/doc/content/xapi/storage/sxm.md @@ -450,8 +450,8 @@ but we've still got a bit of thinking to do: we sort the VDIs to copy based on a let compare_fun v1 v2 = let r = Int64.compare v1.size v2.size in if r = 0 then - let t1 = Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in - let t2 = Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in + let t1 = Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in + let t2 = Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in compare t1 t2 else r in let all_vdis = all_vdis |> List.sort compare_fun in diff --git a/dune b/dune index e2b4842adb5..2a094a073a9 100644 --- a/dune +++ b/dune @@ -13,3 +13,7 @@ (executable (name configure) (libraries dune-configurator findlib cmdliner unix)) + +; Can still be used for dependencies, but dune won't scan these dirs +; for dune files +(data_only_dirs doc scripts python3 .vscode) diff --git a/dune-project b/dune-project index 88080ce624c..94a885046a7 100644 --- a/dune-project +++ b/dune-project @@ -29,6 +29,7 @@ astring mtime ptime + (xapi-log (= :version)) (qcheck-core :with-test) (qcheck-alcotest :with-test) ) diff --git a/ocaml/alerts/expiry_alert.ml b/ocaml/alerts/expiry_alert.ml index 1ea19844ba6..d2667ff35df 100644 --- a/ocaml/alerts/expiry_alert.ml +++ b/ocaml/alerts/expiry_alert.ml @@ -50,7 +50,7 @@ let all_messages rpc session_id = let message_body msg expiry = Printf.sprintf "%s%s" msg - (Date.to_string expiry) + (Date.to_rfc3339 expiry) let expired_message obj = Printf.sprintf "%s has expired." obj @@ -58,7 +58,7 @@ let expiring_message obj = Printf.sprintf "%s is expiring soon." obj let maybe_generate_alert now obj_description alert_conditions expiry = let remaining_days = - days_until_expiry (Date.to_float now) (Date.to_float expiry) + days_until_expiry (Date.to_unix_time now) (Date.to_unix_time expiry) in alert_conditions |> List.sort (fun (a, _) (b, _) -> compare a b) diff --git a/ocaml/doc/dune b/ocaml/doc/dune index ee0f921d032..7c3dbcf4f68 100644 --- a/ocaml/doc/dune +++ b/ocaml/doc/dune @@ -35,3 +35,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md index 20e39627cc3..155a27b23e0 100644 --- a/ocaml/doc/wire-protocol.md +++ b/ocaml/doc/wire-protocol.md @@ -371,9 +371,9 @@ should not assume that references generated during one session are valid for any future session. References do not allow objects to be compared for equality. Two references to the same object are not guaranteed to be textually identical. -UUIDs are intended to be permanent names for objects. They are +UUIDs are intended to be permanent identifiers for objects. They are guaranteed to be in the OSF DCE UUID presentation format (as output by `uuidgen`). -Clients may store UUIDs on disk and use them to lookup objects in subsequent sessions +Clients may store UUIDs on disk and use them to look up objects in subsequent sessions with the server. Clients may also test equality on objects by comparing UUID strings. The API provides mechanisms for translating between UUIDs and opaque references. @@ -463,7 +463,7 @@ XML-RPC and JSON-RPC client libraries. First, initialise python: ```bash -$ python2.7 +$ python3 >>> ``` diff --git a/ocaml/dune b/ocaml/dune new file mode 100644 index 00000000000..dbdeef2876a --- /dev/null +++ b/ocaml/dune @@ -0,0 +1 @@ +(data_only_dirs xe) diff --git a/ocaml/events/event_listen.py b/ocaml/events/event_listen.py deleted file mode 100755 index 79c0f8c4735..00000000000 --- a/ocaml/events/event_listen.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib, sys - -# Don't forget to include the port in the url (eg http://melton:8086/) -if len(sys.argv) <> 4: - raise "Expected arguments: " - -server = xmlrpclib.Server(sys.argv[1]); -session = server.session.login_with_password(sys.argv[2], sys.argv[3], "1.0", "xen-api-event-listen.py")['Value'] - -server.event.register(session, ["*"]) -while True: - events = server.event.next(session)['Value'] - for event in events: - print event['id'], " ", event['class'], " ", event['operation'], " ",event['ref'], " ", - if "snapshot" in event.keys(): - print "OK" - else: - print "(no snapshot)" diff --git a/ocaml/gencert/dune b/ocaml/gencert/dune index ef7875abd29..66a78ca4a41 100644 --- a/ocaml/gencert/dune +++ b/ocaml/gencert/dune @@ -67,3 +67,5 @@ ) (action (run %{test} --color=always)) ) + +(data_only_dirs test_data) diff --git a/ocaml/gencert/gencert.ml b/ocaml/gencert/gencert.ml index 695002ebb67..0d3284379ff 100644 --- a/ocaml/gencert/gencert.ml +++ b/ocaml/gencert/gencert.ml @@ -76,19 +76,34 @@ let () = let program_name = Sys.argv.(0) in let dbg = Printf.sprintf "%s - %f" program_name (Unix.gettimeofday ()) in (* if necessary use Unix.localtime to debug *) - D.debug "%s" dbg ; - match Sys.argv with - | [|_; path; _; _|] when Sys.file_exists path -> - D.info "file already exists at path (%s) - doing nothing" path ; - exit 0 - | [|_; path; cert_gid; sni|] -> ( + let sni_or_exit sni = match SNI.of_string sni with | Some sni -> - main ~dbg ~path ~cert_gid:(int_of_string cert_gid) ~sni () + sni | None -> D.error "SNI must be default or xapi:pool, but got '%s'" sni ; exit 1 - ) + in + let gid_or_exit gid = + match int_of_string_opt gid with + | Some gid -> + gid + | None -> + D.error "GROUPID must be an integer, but got '%s'" gid ; + exit 1 + in + D.debug "%s" dbg ; + match Sys.argv with + | ([|_; path; _|] | [|_; path; _; _|]) when Sys.file_exists path -> + D.info "file already exists at path (%s) - doing nothing" path ; + exit 0 + | [|_; path; sni|] -> + let sni = sni_or_exit sni in + main ~dbg ~path ~cert_gid:(-1) ~sni () + | [|_; path; cert_gid; sni|] -> + let sni = sni_or_exit sni in + let cert_gid = gid_or_exit cert_gid in + main ~dbg ~path ~cert_gid ~sni () | _ -> - D.error "Usage: %s PATH (default|xapi:pool)" program_name ; + D.error "Usage: %s PATH [GROUPID] (default|xapi:pool)" program_name ; exit 1 diff --git a/ocaml/idl/api_version.ml b/ocaml/idl/api_version.ml new file mode 100644 index 00000000000..297be24bc25 --- /dev/null +++ b/ocaml/idl/api_version.ml @@ -0,0 +1,22 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +(* This file is only needed for building xapi with local make, now the + api_version_major and api_version_minor are defined in xapi.spec and this + file will be regenerated from api_version.ml.in by configure.ml during koji + build. *) + +let api_version_major = 2L + +let api_version_minor = 21L diff --git a/ocaml/idl/api_version.ml.in b/ocaml/idl/api_version.ml.in new file mode 100644 index 00000000000..984d207c7f6 --- /dev/null +++ b/ocaml/idl/api_version.ml.in @@ -0,0 +1,17 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +let api_version_major = @APIVERMAJ@L + +let api_version_minor = @APIVERMIN@L diff --git a/ocaml/idl/api_version.mli b/ocaml/idl/api_version.mli new file mode 100644 index 00000000000..ed946e02e93 --- /dev/null +++ b/ocaml/idl/api_version.mli @@ -0,0 +1,17 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val api_version_major : int64 + +val api_version_minor : int64 diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 580ca92ddbb..737ecc53b0f 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -217,9 +217,8 @@ module Session = struct session instance has is_local_superuser set, then the value of \ this field is undefined." ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VDateTime (Date.of_float 0.))) - ~ty:DateTime "validation_time" - "time when session was last validated" + ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime + "validation_time" "time when session was last validated" ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "auth_user_sid" "the subject identifier of the user that was externally \ @@ -2791,8 +2790,13 @@ module Sr_stat = struct , [ ("healthy", "Storage is fully available") ; ("recovering", "Storage is busy recovering, e.g. rebuilding mirrors.") - ; ("unreachable", "Storage is unreachable") - ; ("unavailable", "Storage is unavailable") + ; ( "unreachable" + , "Storage is unreachable but may be recoverable with admin \ + intervention" + ) + ; ( "unavailable" + , "Storage is unavailable, a host reboot will be required" + ) ] ) @@ -3890,9 +3894,11 @@ module VDI = struct ; { param_type= DateTime ; param_name= "snapshot_time" - ; param_doc= "Storage-specific config" + ; param_doc= + "Storage-specific config. When the timezone is missing, UTC is \ + assumed" ; param_release= tampa_release - ; param_default= Some (VDateTime Date.never) + ; param_default= Some (VDateTime Date.epoch) } ; { param_type= Ref _vdi @@ -4084,7 +4090,11 @@ module VDI = struct ~params: [ (Ref _vdi, "self", "The VDI to modify") - ; (DateTime, "value", "The snapshot time of this VDI.") + ; ( DateTime + , "value" + , "The snapshot time of this VDI. When the timezone is missing, UTC \ + is assumed" + ) ] ~flags:[`Session] ~doc:"Sets the snapshot time of this VDI." ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () @@ -4463,7 +4473,7 @@ module VDI = struct ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] "snapshots" "List pointing to all the VDIs snapshots." ; field ~in_product_since:rel_orlando - ~default_value:(Some (VDateTime Date.never)) ~qualifier:DynamicRO + ~default_value:(Some (VDateTime Date.epoch)) ~qualifier:DynamicRO ~ty:DateTime ~doc_tags:[Snapshots] "snapshot_time" "Date/time when this snapshot was created." ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando @@ -4747,7 +4757,7 @@ module VBD_metrics = struct uid _vbd_metrics ; namespace ~name:"io" ~contents:iobandwidth () ; field ~qualifier:DynamicRO ~ty:DateTime - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) ~lifecycle: [ (Published, rel_rio, "") @@ -5173,6 +5183,10 @@ module VM_guest_metrics = struct ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) "os_version" "version of the OS" + ; field ~qualifier:DynamicRO + ~ty:(Map (String, String)) + ~lifecycle:[] "netbios_name" "The NETBIOS name of the machine" + ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) "PV_drivers_version" "version of the PV drivers" @@ -5502,7 +5516,11 @@ module VMPP = struct ~params: [ (Ref _vmpp, "self", "The protection policy") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the last backup was done. When the timezone is missing, \ + UTC is assumed" + ) ] () @@ -5512,7 +5530,11 @@ module VMPP = struct ~params: [ (Ref _vmpp, "self", "The protection policy") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the last archive was done. When the timezone is missing, \ + UTC is assumed" + ) ] () @@ -5660,7 +5682,7 @@ module VMPP = struct "true if this protection policy's backup is running" ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:DateTime "backup_last_run_time" "time of the last backup" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~lifecycle:removed ~qualifier:StaticRO ~ty:archive_target_type "archive_target_type" "type of the archive target config" ~default_value:(Some (VEnum "none")) @@ -5684,7 +5706,7 @@ module VMPP = struct "true if this protection policy's archive is running" ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:DateTime "archive_last_run_time" "time of the last archive" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs attached to this protection policy" ; field ~lifecycle:removed ~qualifier:StaticRO ~ty:Bool @@ -5773,7 +5795,11 @@ module VMSS = struct ~params: [ (Ref _vmss, "self", "The snapshot schedule") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the schedule was last run. When a timezone is missing, \ + UTC is assumed" + ) ] () @@ -5847,7 +5873,7 @@ module VMSS = struct ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:DateTime "last_run_time" "time of the last snapshot" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs attached to this snapshot schedule" ] @@ -6302,7 +6328,10 @@ module Message = struct [ (cls, "cls", "The class of object") ; (String, "obj_uuid", "The uuid of the object") - ; (DateTime, "since", "The cutoff time") + ; ( DateTime + , "since" + , "The cutoff time. When the timezone is missing, UTC is assumed" + ) ] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The relevant messages") @@ -6310,7 +6339,13 @@ module Message = struct let get_since = call ~name:"get_since" ~in_product_since:rel_orlando - ~params:[(DateTime, "since", "The cutoff time")] + ~params: + [ + ( DateTime + , "since" + , "The cutoff time. When the timezone is missing, UTC is assumed" + ) + ] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The relevant messages") ~allowed_roles:_R_READ_ONLY () diff --git a/ocaml/idl/datamodel_certificate.ml b/ocaml/idl/datamodel_certificate.ml index bfbdd2b60b5..53c594fb941 100644 --- a/ocaml/idl/datamodel_certificate.ml +++ b/ocaml/idl/datamodel_certificate.ml @@ -59,10 +59,10 @@ let t = ~default_value:(Some (VRef null_ref)) "The host where the certificate is installed" ; field ~qualifier:StaticRO ~lifecycle ~ty:DateTime "not_before" - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) "Date after which the certificate is valid" ; field ~qualifier:StaticRO ~lifecycle ~ty:DateTime "not_after" - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) "Date before which the certificate is valid" ; field ~qualifier:StaticRO ~lifecycle: diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index ec7e2d7fdb2..e66ab3eff93 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -153,9 +153,9 @@ let tech_preview_releases = (* api version *) (* Normally xencenter_min_verstring and xencenter_max_verstring in the xapi_globs should be set to the same value, * but there are exceptions: please consult the XenCenter maintainers if in doubt. *) -let api_version_major = 2L +let api_version_major = Api_version.api_version_major -let api_version_minor = 21L +let api_version_minor = Api_version.api_version_minor let api_version_string = Printf.sprintf "%Ld.%Ld" api_version_major api_version_minor diff --git a/ocaml/idl/datamodel_errors.ml b/ocaml/idl/datamodel_errors.ml index 3071a4add47..aead3e0abc4 100644 --- a/ocaml/idl/datamodel_errors.ml +++ b/ocaml/idl/datamodel_errors.ml @@ -1256,6 +1256,9 @@ let _ = () ; error Api_errors.sr_is_cache_sr ["host"] ~doc:"The SR is currently being used as a local cache SR." () ; + error Api_errors.sr_unhealthy ["sr"; "health"; "fix"] + ~doc:"The SR is currently unhealthy. See the suggestion on how to fix it." + () ; error Api_errors.clustered_sr_degraded ["sr"] ~doc: "An SR is using clustered local storage. It is not safe to reboot a host \ diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index b7d34350819..d48470f3a71 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -935,7 +935,9 @@ let create_params = ; { param_type= DateTime ; param_name= "last_software_update" - ; param_doc= "Date and time when the last software update was applied." + ; param_doc= + "Date and time when the last software update was applied. When the \ + timezone is missing, UTC is assumed" ; param_release= dundee_release ; param_default= Some (VDateTime Date.epoch) } @@ -2188,8 +2190,7 @@ let t = "tls_verification_enabled" ~default_value:(Some (VBool false)) "True if this host has TLS verifcation enabled" ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:DateTime - "last_software_update" - ~default_value:(Some (VDateTime (Date.of_float 0.0))) + "last_software_update" ~default_value:(Some (VDateTime Date.epoch)) "Date and time when the last software update was applied" ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:Bool ~default_value:(Some (VBool false)) "https_only" diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index 1a101ead83b..bcd67b50acb 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -26,7 +26,7 @@ let prototyped_of_field = function | "Observer", "uuid" -> Some "23.14.0" | "Repository", "origin" -> - Some "24.21.0-next" + Some "24.23.0" | "Repository", "gpgkey_path" -> Some "22.12.0" | "Certificate", "fingerprint_sha1" -> @@ -67,6 +67,8 @@ let prototyped_of_field = function Some "22.27.0" | "host", "last_software_update" -> Some "22.20.0" + | "VM_guest_metrics", "netbios_name" -> + Some "24.28.0" | "VM", "groups" -> Some "24.19.1" | "VM", "pending_guidances_full" -> @@ -126,7 +128,7 @@ let prototyped_of_message = function | "Repository", "set_gpgkey_path" -> Some "22.12.0" | "Repository", "introduce_bundle" -> - Some "24.21.0-next" + Some "24.23.0" | "PCI", "get_dom0_access_status" -> Some "24.14.0" | "PCI", "enable_dom0_access" -> diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index 61893c99e01..67a6fdd4ea1 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -26,12 +26,12 @@ *) module Date = struct - open Xapi_stdext_date + module Date = Xapi_stdext_date.Date include Date - let iso8601_of_rpc rpc = Date.of_string (Rpc.string_of_rpc rpc) + let t_of_rpc rpc = Date.of_iso8601 (Rpc.string_of_rpc rpc) - let rpc_of_iso8601 date = Rpc.rpc_of_string (Date.to_string date) + let rpc_of_t date = Rpc.rpc_of_string (Date.to_rfc3339 date) end (* useful constants for product vsn tracking *) @@ -418,7 +418,7 @@ type api_value = | VInt of int64 | VFloat of float | VBool of bool - | VDateTime of Date.iso8601 + | VDateTime of Date.t | VEnum of string | VMap of (api_value * api_value) list | VSet of api_value list diff --git a/ocaml/idl/datamodel_types.mli b/ocaml/idl/datamodel_types.mli index 76ac814eb49..fbfb9e4a6f6 100644 --- a/ocaml/idl/datamodel_types.mli +++ b/ocaml/idl/datamodel_types.mli @@ -1,9 +1,9 @@ module Date : sig include module type of Xapi_stdext_date.Date - val iso8601_of_rpc : Rpc.t -> Xapi_stdext_date.Date.iso8601 + val t_of_rpc : Rpc.t -> Xapi_stdext_date.Date.t - val rpc_of_iso8601 : Xapi_stdext_date.Date.iso8601 -> Rpc.t + val rpc_of_t : Xapi_stdext_date.Date.t -> Rpc.t end val oss_since_303 : string option @@ -115,7 +115,7 @@ type api_value = | VInt of int64 | VFloat of float | VBool of bool - | VDateTime of Date.iso8601 + | VDateTime of Date.t | VEnum of string | VMap of (api_value * api_value) list | VSet of api_value list diff --git a/ocaml/idl/datamodel_values.ml b/ocaml/idl/datamodel_values.ml index 1b463d4b2e7..e270899b50f 100644 --- a/ocaml/idl/datamodel_values.ml +++ b/ocaml/idl/datamodel_values.ml @@ -40,7 +40,7 @@ let rec to_rpc v = | VBool b -> Rpc.Bool b | VDateTime d -> - Rpc.String (Date.to_string d) + Rpc.String (Date.to_rfc3339 d) | VEnum e -> Rpc.String e | VMap vvl -> @@ -94,7 +94,7 @@ let to_db v = | VBool false -> String "false" | VDateTime d -> - String (Date.to_string d) + String (Date.to_rfc3339 d) | VEnum e -> String e | VMap vvl -> @@ -117,7 +117,7 @@ let gen_empty_db_val t = | Bool -> Value.String "false" | DateTime -> - Value.String (Date.to_string Date.never) + Value.String Date.(to_rfc3339 epoch) | Enum (_, (enum_value, _) :: _) -> Value.String enum_value | Enum (_, []) -> diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index bf6fe168f8a..af7aa27b270 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -339,7 +339,11 @@ let update_snapshot_metadata = [ (Ref _vm, "vm", "The VM to update") ; (Ref _vm, "snapshot_of", "") - ; (DateTime, "snapshot_time", "") + ; ( DateTime + , "snapshot_time" + , "The timestamp the snapshot was taken. When a timezone is missing, \ + UTC is assumed" + ) ; (String, "transportable_snapshot_id", "") ] ~allowed_roles:_R_POOL_OP () @@ -1628,6 +1632,43 @@ let operations = ] ) +let set_blocked_operations = + call ~name:"set_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [ + (Ref _vm, "self", "The VM") + ; (Map (operations, String), "value", "Blocked operations") + ] + ~allowed_roles:_R_VM_ADMIN () + +let add_to_blocked_operations = + call ~name:"add_to_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [ + (Ref _vm, "self", "The VM") + ; (operations, "key", "Blocked operation") + ; (String, "value", "Error code") + ] + ~allowed_roles:_R_VM_ADMIN () + +let remove_from_blocked_operations = + call ~name:"remove_from_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [(Ref _vm, "self", "The VM"); (operations, "key", "Blocked operation")] + ~allowed_roles:_R_VM_ADMIN () + let assert_operation_valid = call ~in_oss_since:None ~in_product_since:rel_rio ~name:"assert_operation_valid" @@ -1909,6 +1950,9 @@ let t = ; restart_device_models ; set_uefi_mode ; get_secureboot_readiness + ; set_blocked_operations + ; add_to_blocked_operations + ; remove_from_blocked_operations ] ~contents: ([uid _vm] @@ -2072,7 +2116,7 @@ let t = "List pointing to all the VM snapshots." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando - ~default_value:(Some (VDateTime Date.never)) ~ty:DateTime + ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime "snapshot_time" "Date/time when this snapshot was created." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VString "")) @@ -2086,7 +2130,7 @@ let t = ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) - ~qualifier:RW + ~qualifier:StaticRO ~ty:(Map (operations, String)) "blocked_operations" "List of operations which have been explicitly blocked and an \ diff --git a/ocaml/idl/dune b/ocaml/idl/dune index 430938311f8..d971e6597df 100644 --- a/ocaml/idl/dune +++ b/ocaml/idl/dune @@ -6,7 +6,7 @@ datamodel_pool datamodel_cluster datamodel_cluster_host dm_api escaping datamodel_values datamodel_schema datamodel_certificate datamodel_diagnostics datamodel_repository datamodel_lifecycle - datamodel_vtpm datamodel_observer datamodel_vm_group) + datamodel_vtpm datamodel_observer datamodel_vm_group api_version) (libraries rpclib.core sexplib0 @@ -45,11 +45,12 @@ (action (run %{x} -closed -markdown)) ) -(test - (name schematest) +(tests + (names schematest test_datetimes) (modes exe) - (modules schematest) + (modules schematest test_datetimes) (libraries + astring rpclib.core rpclib.json xapi_datamodel @@ -81,3 +82,5 @@ (alias update-dm-lifecycle) (action (diff datamodel_lifecycle.ml datamodel_lifecycle.ml.generated))) + +(data_only_dirs templates) diff --git a/ocaml/idl/json_backend/gen_json.ml b/ocaml/idl/json_backend/gen_json.ml index 446eeb04b8f..5c8fc0da0ff 100644 --- a/ocaml/idl/json_backend/gen_json.ml +++ b/ocaml/idl/json_backend/gen_json.ml @@ -89,7 +89,7 @@ end = struct | VBool x -> string_of_bool x | VDateTime x -> - Date.to_string x + Date.to_rfc3339 x | VEnum x -> x | VMap x -> diff --git a/ocaml/idl/ocaml_backend/dune b/ocaml/idl/ocaml_backend/dune index f6c4173d363..70cc34c90a1 100644 --- a/ocaml/idl/ocaml_backend/dune +++ b/ocaml/idl/ocaml_backend/dune @@ -11,3 +11,4 @@ ) ) +(data_only_dirs python) diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 564121ab819..5b18d603f4e 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -354,6 +354,38 @@ let toposort_types highapi types = assert (List.sort compare result = List.sort compare types) ; result +let gen_record_deserialization highapi = + let gen_of_to_string types = + let gen_string_and_all = function + | DT.Set (DT.Enum (_, elist) as e) -> + let nlist = List.map fst elist in + [ + (Printf.sprintf "let %s_of_string str = %s") + (OU.alias_of_ty e) + (OU.ocaml_of_string_of_enum nlist) + ; (Printf.sprintf "let %s_to_string = %s") + (OU.alias_of_ty e) + (OU.ocaml_to_string_of_enum nlist) + ] + | _ -> + [] + in + List.concat_map gen_string_and_all types + in + let all_types = all_types_of highapi in + let all_types = add_set_enums all_types in + List.iter (List.iter print) + (between [""] + [ + [ + "exception Record_failure of string" + ; "let record_failure fmt =" + ; "Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt" + ] + ; gen_of_to_string all_types + ] + ) + let gen_client_types highapi = let all_types = all_types_of highapi in let all_types = add_set_enums all_types in @@ -381,9 +413,9 @@ let gen_client_types highapi = "module Date = struct" ; " open Xapi_stdext_date" ; " include Date" - ; " let rpc_of_iso8601 x = DateTime (Date.to_string x)" - ; " let iso8601_of_rpc = function String x | DateTime x -> \ - Date.of_string x | _ -> failwith \"Date.iso8601_of_rpc\"" + ; " let rpc_of_t x = DateTime (Date.to_rfc3339 x)" + ; " let t_of_rpc = function String x | DateTime x -> Date.of_iso8601 \ + x | _ -> failwith \"Date.t_of_rpc\"" ; "end" ] ; [ diff --git a/ocaml/idl/ocaml_backend/gen_api_main.ml b/ocaml/idl/ocaml_backend/gen_api_main.ml index 4765e498278..41ffde51a8f 100644 --- a/ocaml/idl/ocaml_backend/gen_api_main.ml +++ b/ocaml/idl/ocaml_backend/gen_api_main.ml @@ -73,7 +73,17 @@ let _ = [ ( "-mode" , Arg.Symbol - ( ["client"; "server"; "api"; "db"; "actions"; "sql"; "rbac"; "test"] + ( [ + "client" + ; "server" + ; "api" + ; "utils" + ; "db" + ; "actions" + ; "sql" + ; "rbac" + ; "test" + ] , fun x -> mode := Some x ) , "Choose which file to output" @@ -114,6 +124,8 @@ let _ = Gen_api.gen_client api | Some "api" -> Gen_api.gen_client_types api + | Some "utils" -> + Gen_api.gen_record_deserialization api | Some "server" -> Gen_api.gen_server api | Some "db" -> diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 23c3dc8a747..44542173fe9 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -69,7 +69,7 @@ let dm_to_string tys : O.Module.t = | DT.Bool -> "string_of_bool" | DT.DateTime -> - "Date.to_string" + "Date.to_rfc3339" | DT.Enum (_name, cs) -> let aux (c, _) = Printf.sprintf {|| %s -> "%s"|} (OU.constructor_of c) c @@ -119,7 +119,7 @@ let string_to_dm tys : O.Module.t = | DT.Bool -> "bool_of_string" | DT.DateTime -> - "fun x -> Date.of_string x" + "fun x -> Date.of_iso8601 x" | DT.Enum (name, cs) -> let aux (c, _) = "\"" ^ c ^ "\" -> " ^ OU.constructor_of c in "fun v -> match v with\n " diff --git a/ocaml/idl/ocaml_backend/gen_test.ml b/ocaml/idl/ocaml_backend/gen_test.ml index d9824961db0..abf251014f0 100644 --- a/ocaml/idl/ocaml_backend/gen_test.ml +++ b/ocaml/idl/ocaml_backend/gen_test.ml @@ -32,7 +32,7 @@ let rec gen_test_type highapi ty = | DT.Bool -> "true" | DT.DateTime -> - "(Date.of_string \"20120101T00:00:00Z\")" + "(Date.of_iso8601 \"20120101T00:00:00Z\")" | DT.Enum (_, (x, _) :: _) -> Printf.sprintf "(%s)" (OU.constructor_of x) | DT.Set (DT.Enum (_, y)) -> diff --git a/ocaml/idl/ocaml_backend/ocaml_utils.ml b/ocaml/idl/ocaml_backend/ocaml_utils.ml index a01ae955586..7fe7fe063bc 100644 --- a/ocaml/idl/ocaml_backend/ocaml_utils.ml +++ b/ocaml/idl/ocaml_backend/ocaml_utils.ml @@ -101,6 +101,19 @@ let ocaml_to_string_of_enum list = let single name = Printf.sprintf {|%s -> "%s"|} (constructor_of name) name in Printf.sprintf "function %s" (ocaml_map_enum_ " | " single list) +(** Create the body of an of_string function for an enum *) +let ocaml_of_string_of_enum list = + let single name = + Printf.sprintf {|"%s" -> %s|} + (String.lowercase_ascii name) + (constructor_of name) + in + let quoted name = Printf.sprintf {|'%s'|} name in + Printf.sprintf + {|match String.lowercase_ascii str with %s | s -> record_failure "Expected one of %s, got %%s" s|} + (ocaml_map_enum_ " | " single list) + (ocaml_map_enum_ ", " quoted list) + (** Convert an IDL type into a string containing OCaml code representing the type. *) let rec ocaml_of_ty = function @@ -115,7 +128,7 @@ let rec ocaml_of_ty = function | Bool -> "bool" | DateTime -> - "Date.iso8601" + "Date.t" | Set (Record x) -> alias_of_ty (Record x) ^ " list" | Set x -> diff --git a/ocaml/idl/ocaml_backend/python/list_vms.py b/ocaml/idl/ocaml_backend/python/list_vms.py deleted file mode 100755 index 0d7a75313cb..00000000000 --- a/ocaml/idl/ocaml_backend/python/list_vms.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://melton:8086"); -session = server.session.login_with_password("root", "xenroot", "1.0", "xen-api-list-vms.py")['Value'] -print session -vms = server.VM.get_all(session)['Value'] -print vms -#for vm in vms: -# print vm,server.VM.get_kernel__kernel(session, vm) diff --git a/ocaml/idl/ocaml_backend/python/pause_vm.py b/ocaml/idl/ocaml_backend/python/pause_vm.py deleted file mode 100755 index 2795496e1cd..00000000000 --- a/ocaml/idl/ocaml_backend/python/pause_vm.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://localhost:8086"); -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-pause-vm.py")['Value'] -server.VM.do_pause(session, '7366a41a-e50e-b891-fa0c-ca5b4d2e3f1c') diff --git a/ocaml/idl/ocaml_backend/python/test_client.py b/ocaml/idl/ocaml_backend/python/test_client.py deleted file mode 100755 index 05888c97db7..00000000000 --- a/ocaml/idl/ocaml_backend/python/test_client.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python - -import getopt, sys, xmlrpclib - -url = "http://dhcp108:70000" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] - -# Create an object to represent our server. -server = xmlrpclib.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-test-client.py") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.do_list(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] -other = server.VM.get_otherConfig(session, first_vm) -print repr(other) - - -#state = server.VM.get_power_state(session, first_vm) -#if state == "Halted": -# print "Starting first VM... ", -# server.VM.do_start(session, first_vm, 1==0) -#elif state == "Suspended": -# print "Restoring first VM..." -# server.VM.do_unhibernate(session, first_vm, 1==0) -#elif state == "Running": -# print "Suspending first VM... ", -# server.VM.do_hibernate(session, first_vm, 1==1) -#print "OK" - -print "Logging out... ", -server.Session.do_logout(session) -print "OK" diff --git a/ocaml/idl/ocaml_backend/python/unpause_vm.py b/ocaml/idl/ocaml_backend/python/unpause_vm.py deleted file mode 100755 index 97d748e1dca..00000000000 --- a/ocaml/idl/ocaml_backend/python/unpause_vm.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://localhost:8086"); -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-unpause-vm.py")['Value'] -server.VM.do_unpause(session, '7366a41a-e50e-b891-fa0c-ca5b4d2e3f1c') diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index e81d05ee0ab..0afe0a10be1 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "4417b0087b481c3038e73f170b7d4d01" +let last_known_schema_hash = "60590fa3fa2f8af66d9bf3c50b7bacc2" let current_schema_hash : string = let open Datamodel_types in @@ -19,11 +19,10 @@ let () = if last_known_schema_hash <> current_schema_hash then ( Printf.eprintf {| - New schema hash ('%s') doesn't match the last known one. Please bump the -datamodel schema versions if necessary, and update 'last_known_schema_hash'. - +datamodel schema versions if necessary, and update 'last_known_schema_hash' +in file %s. |} - current_schema_hash ; + current_schema_hash __FILE__ ; exit 1 ) diff --git a/ocaml/idl/templates/toc.mustache b/ocaml/idl/templates/toc.mustache index 4b58953b6e7..126bf2922e6 100644 --- a/ocaml/idl/templates/toc.mustache +++ b/ocaml/idl/templates/toc.mustache @@ -8,7 +8,7 @@ - title: Types url: @root@management-api/types.html {{#classes}} - - title: Class:{{{name}}} + - title: "Class: {{{name}}}" url: @root@management-api/class-{{{name_lower}}}.html {{/classes}} - title: Error Handling diff --git a/ocaml/idl/test_datetimes.ml b/ocaml/idl/test_datetimes.ml new file mode 100644 index 00000000000..ab31f313e5d --- /dev/null +++ b/ocaml/idl/test_datetimes.ml @@ -0,0 +1,47 @@ +module DT = Datamodel_types + +let calls_with_datetime_params = + let get_messages DT.{name; messages; _} = + List.to_seq messages + |> Seq.map (fun msg -> + DT.{msg with msg_name= Printf.sprintf "%s.%s" name msg.msg_name} + ) + in + let with_datetimes DT.{msg_name; msg_params; _} = + let cursed_params = + List.filter_map + (fun param -> + if + param.DT.param_type = DT.DateTime + && not (Astring.String.is_infix ~affix:"UTC" param.param_doc) + then + Some (msg_name, param.param_name, param.param_doc) + else + None + ) + msg_params + in + if cursed_params <> [] then Some (List.to_seq cursed_params) else None + in + + Datamodel.all_system + |> List.to_seq + |> Seq.concat_map get_messages + |> Seq.filter_map with_datetimes + |> Seq.concat + +let () = + if not (Seq.is_empty calls_with_datetime_params) then ( + Printf.printf + "\x1b[31;1mERROR\x1b[0m: Found datetime parameters in calls without \ + proper documentation. It must mention that datetimes are assumed to \ + have UTC when they do not contain a timezone. Parameters found:\n" ; + calls_with_datetime_params + |> Seq.iter (fun (call_name, param_name, param_doc) -> + Printf.printf "%s (%s): %s\n" call_name param_name param_doc + ) ; + exit 1 + ) else + Printf.printf + "\x1b[32;1mOK\x1b[0m: All datetime parameters in calls have proper \ + documentation." diff --git a/ocaml/idl/test_datetimes.mli b/ocaml/idl/test_datetimes.mli new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ocaml/libs/clock/date.ml b/ocaml/libs/clock/date.ml index a4a43cde623..c668b0c1fb3 100644 --- a/ocaml/libs/clock/date.ml +++ b/ocaml/libs/clock/date.ml @@ -10,6 +10,8 @@ GNU Lesser General Public License for more details. *) +module L = Debug.Make (struct let name = __MODULE__ end) + let months = [| "Jan" @@ -28,22 +30,18 @@ let months = let days = [|"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"|] -type print_timezone = Empty | TZ of string - -(* we must store the print_type with iso8601 to handle the case where the local time zone is UTC *) -type t = Ptime.date * Ptime.time * print_timezone +(* iso8601 allows datetimes to not contain any timezone information. + Unfortunately we need to maintain this information because this means that + the timestamp cannot be converted back to a timestamp with UTC as a + reference. When serializing timezoneless timestamps, the timezone must be + avoided yet again. *) +type tz = int option -let utc = TZ "Z" +type t = {t: Ptime.t; tz: tz} -let of_dt print_type dt = - let date, time = dt in - (date, time, print_type) - -let to_dt (date, time, _) = (date, time) +let utc = Some 0 let best_effort_iso8601_to_rfc3339 x = - (* (a) add dashes - * (b) add UTC tz if no tz provided *) let x = try Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest -> @@ -60,30 +58,39 @@ let best_effort_iso8601_to_rfc3339 x = in match tz with | None | Some "" -> - (* the caller didn't specify a tz. we must try to add one so that ptime can at least attempt to parse *) - (Printf.sprintf "%sZ" x, Empty) - | Some tz -> - (x, TZ tz) + (* the caller didn't specify a tz, assume Coordinatel Universal Time *) + Printf.sprintf "%sZ" x + | Some _ -> + x let of_iso8601 x = - let rfc3339, print_timezone = best_effort_iso8601_to_rfc3339 x in + let rfc3339 = best_effort_iso8601_to_rfc3339 x in match Ptime.of_rfc3339 rfc3339 |> Ptime.rfc3339_error_to_msg with | Error _ -> invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x) - | Ok (t, tz, _) -> ( - match tz with - | None | Some 0 -> - Ptime.to_date_time t |> of_dt print_timezone - | Some _ -> - invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x) - ) - -let to_rfc3339 ((y, mon, d), ((h, min, s), _), print_type) = - match print_type with - | TZ tz -> - Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz - | Empty -> - Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i" y mon d h min s + | Ok (t, tz, _) -> + {t; tz} + +let print_tz tz_s = + match tz_s with + | None -> + "" + | Some 0 -> + "Z" + | Some tz -> + let tz_sign = if tz < 0 then '-' else '+' in + let all_tz_minutes = tz / 60 |> Int.abs in + let tz_h = all_tz_minutes / 60 in + let tz_min = all_tz_minutes mod 60 in + Printf.sprintf "%c%02d:%02d" tz_sign tz_h tz_min + +let to_rfc3339 {t; tz} = + (* Must be compatible with iso8601 as well. Because some client limitations, + the hyphens between year, month and day have to be absent + *) + let (y, mon, d), ((h, min, s), _) = Ptime.to_date_time ?tz_offset_s:tz t in + let tz = print_tz tz in + Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz (* Extracted from tondering.dk/claus/cal/chrweek.php#calcdow *) let weekday ~year ~mon ~day = @@ -92,29 +99,25 @@ let weekday ~year ~mon ~day = let m = mon + (12 * a) - 2 in (day + y + (y / 4) - (y / 100) + (y / 400) + (31 * m / 12)) mod 7 -let to_rfc822 ((year, mon, day), ((h, min, s), _), print_type) = - let timezone = - match print_type with Empty | TZ "Z" -> "GMT" | TZ tz -> tz +let to_rfc822 {t; tz} = + let (year, mon, day), ((h, min, s), _) = + Ptime.to_date_time ?tz_offset_s:tz t in + let timezone = match print_tz tz with "Z" -> "GMT" | tz -> tz in let weekday = weekday ~year ~mon ~day in Printf.sprintf "%s, %d %s %d %02d:%02d:%02d %s" days.(weekday) day months.(mon - 1) year h min s timezone -let to_ptime_t t = - match to_dt t |> Ptime.of_date_time with - | Some t -> +let to_ptime = function + | {t; tz= None} as d -> + L.warn "%s: Date %s converted to POSIX time, but timezone is missing" + __FUNCTION__ (to_rfc3339 d) ; + t + | {t; tz= Some _} -> t - | None -> - let _, (_, offset), _ = t in - invalid_arg - (Printf.sprintf "%s: dt='%s', offset='%i' is invalid" __FUNCTION__ - (to_rfc3339 t) offset - ) - -let to_ptime = to_ptime_t -let of_ptime t = Ptime.to_date_time t |> of_dt utc +let of_ptime t = {t; tz= utc} let of_unix_time s = match Ptime.of_float_s s with @@ -123,24 +126,22 @@ let of_unix_time s = | Some t -> of_ptime t -let to_unix_time t = to_ptime_t t |> Ptime.to_float_s +let to_unix_time t = to_ptime t |> Ptime.to_float_s -let _localtime current_tz_offset t = - let tz_offset_s = current_tz_offset |> Option.value ~default:0 in - let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt Empty in - let _, (_, localtime_offset), _ = localtime in - if localtime_offset <> tz_offset_s then - invalid_arg - (Printf.sprintf "%s: offsets don't match. offset='%i', t='%s'" - __FUNCTION__ tz_offset_s (Ptime.to_rfc3339 t) - ) ; - localtime +let strip_tz tz t = + let t = + match tz with + | None -> + t + | Some tz -> + Ptime.Span.of_int_s tz |> Ptime.add_span t |> Option.value ~default:t + in + {t; tz= None} -let _localtime_string current_tz_offset t = - _localtime current_tz_offset t |> to_rfc3339 +let _localtime_string tz t = strip_tz tz t |> to_rfc3339 let localtime () = - _localtime (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ()) + strip_tz (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ()) let now () = of_ptime (Ptime_clock.now ()) @@ -152,19 +153,19 @@ let is_later ~than t = Ptime.is_later ~than:(to_ptime than) (to_ptime t) let diff a b = Ptime.diff (to_ptime a) (to_ptime b) -let compare_print_tz a b = +let compare_tz a b = match (a, b) with - | Empty, Empty -> + | None, None -> 0 - | TZ a_s, TZ b_s -> - String.compare a_s b_s - | Empty, TZ _ -> + | Some a_s, Some b_s -> + Int.compare a_s b_s + | None, Some _ -> -1 - | TZ _, Empty -> + | Some _, None -> 1 -let compare ((_, _, a_z) as a) ((_, _, b_z) as b) = +let compare a b = let ( ) a b = if a = 0 then b else a in - Ptime.compare (to_ptime a) (to_ptime b) compare_print_tz a_z b_z + Ptime.compare (to_ptime a) (to_ptime b) compare_tz a.tz b.tz -let eq x y = compare x y = 0 +let equal x y = if x == y then true else compare x y = 0 diff --git a/ocaml/libs/clock/date.mli b/ocaml/libs/clock/date.mli index 2a0123813b3..1ba0f19c9d9 100644 --- a/ocaml/libs/clock/date.mli +++ b/ocaml/libs/clock/date.mli @@ -12,24 +12,32 @@ * GNU Lesser General Public License for more details. *) -(** date-time with support for keeping timezone for ISO 8601 conversion *) +(** Nanosecond-precision POSIX timestamps, allows datetimes with unspecified + timezones. These are needed to produce and accept ISO 8601 datetimes without + timezones, but because the timezone is not known they do not share a common + point of time with any other timestamps they cannot be converted to unix + time, or be compared with other timestamps. All other timestamps have a + timezone attached to them, which will be used to serialize them to a + datetime string. This timezone is determined when creating a value and + cannot be changed. For timestamps created from datetime strings, the + timezone is maintained. For all other values UTC is used. *) type t (** Conversions *) val of_ptime : Ptime.t -> t -(** Convert ptime to time in UTC *) +(** Converts ptime to date *) val to_ptime : t -> Ptime.t (** Convert date/time to a ptime value: the number of seconds since 00:00:00 - UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *) + UTC, 1 Jan 1970. When {!t} lacks a timezone, UTC is assumed *) val of_unix_time : float -> t (** Convert calendar time [x] (as returned by e.g. Unix.time), to time in UTC *) val to_unix_time : t -> float (** Convert date/time to a unix timestamp: the number of seconds since - 00:00:00 UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *) + 00:00:00 UTC, 1 Jan 1970. When {!t} lacks a timezone, UTC is assumed *) val to_rfc822 : t -> string (** Convert date/time to email-formatted (RFC 822) string. *) @@ -39,38 +47,42 @@ val to_rfc3339 : t -> string the ISO 8601 format *) val of_iso8601 : string -> t -(** Convert ISO 8601 formatted string to a date/time value. Does not accept a - timezone annotated datetime - i.e. string must be UTC, and end with a Z *) +(** Convert ISO 8601 formatted string to a date/time value. Timezone can be + missing from the string, but that means some conversions will assume UTC, + which might be incorrect *) val epoch : t -(** 00:00:00 UTC, 1 Jan 1970, in UTC *) +(** 00:00:00 UTC, 1 Jan 1970 *) val now : unit -> t -(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in UTC *) +(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970 *) val _localtime_string : Ptime.tz_offset_s option -> Ptime.t -> string (** exposed for testing *) val localtime : unit -> t -(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in local - time *) +(** Local date time, the timezone is stripped. Do not use this call in new + code. *) (** Comparisons *) -val eq : t -> t -> bool -(** [eq a b] returns whether [a] and [b] are equal *) +val equal : t -> t -> bool +(** [equal a b] returns whether [a] and [b] are equal. Timestamps that are not + on UTC will only be equal to the values in their same memory position. *) val compare : t -> t -> int (** [compare a b] returns -1 if [a] is earlier than [b], 1 if [a] is later than - [b] or the ordering of the timezone printer *) + [b] or which timeone is sooner. When [a] or [b] lack a timezone, UTC is + assumed *) val is_earlier : than:t -> t -> bool (** [is_earlier ~than a] returns whether the timestamp [a] happens before - [than] *) + [than]. When [than] or [b] lack a timezone, UTC is assumed. *) val is_later : than:t -> t -> bool -(** [is_later ~than a] returns whether the timestamp [a] happens after [than] - *) +(** [is_later ~than a] returns whether the timestamp [a] happens after [than]. + When [than] or [b] lack a timezone, UTC is assumed. *) val diff : t -> t -> Ptime.Span.t -(** [diff a b] returns the span of time corresponding to [a - b] *) +(** [diff a b] returns the span of time corresponding to [a - b]. When [than] + or [b] lack a timezone, UTC is assumed. *) diff --git a/ocaml/libs/clock/dune b/ocaml/libs/clock/dune index 76285033f35..a2afef36460 100644 --- a/ocaml/libs/clock/dune +++ b/ocaml/libs/clock/dune @@ -8,6 +8,7 @@ mtime.clock.os (re_export ptime) ptime.clock.os + xapi-log ) ) diff --git a/ocaml/libs/clock/test_date.ml b/ocaml/libs/clock/test_date.ml index 78f673f635c..9318f44af54 100644 --- a/ocaml/libs/clock/test_date.ml +++ b/ocaml/libs/clock/test_date.ml @@ -10,6 +10,28 @@ let dash_time_str = "2020-04-07T08:28:32Z" let no_dash_utc_time_str = "20200407T08:28:32Z" +let best_effort_iso8601_to_rfc3339 x = + let x = + try + Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest -> + Printf.sprintf "%04d-%02d-%02dT%s" y mon d rest + ) + with _ -> x + in + let tz = + try + Scanf.sscanf x "%04d-%02d-%02dT%02d:%02d:%02d%s" (fun _ _ _ _ _ _ tz -> + Some tz + ) + with _ -> None + in + match tz with + | None | Some "" -> + (* the caller didn't specify a tz, use the Unqualified Local Time *) + Printf.sprintf "%s-00:00" x + | Some _ -> + x + let tests = let test_of_unix_time_invertible () = let non_int_time = 1586245987.70200706 in @@ -17,20 +39,24 @@ let tests = check_float "to_unix_time inverts of_unix_time" time (time |> of_unix_time |> to_unix_time) ; check_true "of_unix_time inverts to_unix_time" - @@ eq (time |> of_unix_time) + @@ equal (time |> of_unix_time) (time |> of_unix_time |> to_unix_time |> of_unix_time) in - let test_only_utc () = + let test_iso8601 () = let utc = "2020-12-20T18:10:19Z" in let _ = of_iso8601 utc in (* UTC is valid *) let non_utc = "2020-12-20T18:10:19+02:00" in - let exn = - Invalid_argument "Clock__Date.of_iso8601: 2020-12-20T18:10:19+02:00" + let _ = of_iso8601 non_utc in + () + in + let test_roundtrip_conversion () = + let non_utc = ["20201220T18:10:19+02:00"; "20201220T18:10:19-08:45"] in + let test spec = + let result = spec |> of_iso8601 |> to_rfc3339 in + Alcotest.(check string) "Roundtrip conversion be consistent" spec result in - Alcotest.check_raises "only UTC is accepted" exn (fun () -> - of_iso8601 non_utc |> ignore - ) + List.iter test non_utc in let test_ca333908 () = check_float "dash time and no dash time represent the same unix timestamp" @@ -41,7 +67,7 @@ let tests = check_string "to_rfc3339 inverts of_iso8601" no_dash_utc_time_str (no_dash_utc_time_str |> of_iso8601 |> to_rfc3339) ; check_true "of_iso8601 inverts to_rfc3339" - (eq + (equal (no_dash_utc_time_str |> of_iso8601) (no_dash_utc_time_str |> of_iso8601 |> to_rfc3339 |> of_iso8601) ) @@ -51,6 +77,21 @@ let tests = check_string "to_rfc3339 is backwards compatible" no_dash_utc_time_str (dash_time_str |> of_iso8601 |> to_rfc3339) in + let test_localtime () = + let time = localtime () in + match + time + |> to_rfc3339 + |> best_effort_iso8601_to_rfc3339 + |> Ptime.of_rfc3339 + |> Ptime.rfc3339_error_to_msg + with + | Ok (_, tz, _) -> + Alcotest.(check @@ option int) + "localtime generates a timestamp without timezone" None tz + | Error (`Msg msg) -> + Alcotest.failf "Unexpected error: %s" msg + in let test_localtime_string () = let[@warning "-8"] (Ok (t, _, _)) = Ptime.of_rfc3339 "2020-04-07T09:01:28Z" @@ -82,24 +123,33 @@ let tests = (String.contains localtime_string 'Z') in let test_xsi894 () = + let canonical = "20201210T17:19:20Z" in let missing_tz_no_dash = "20201210T17:19:20" in let missing_tz_dash = "2020-12-10T17:19:20" in - check_string "can process missing tz no dash" missing_tz_no_dash + check_string + "Timestamp without timezones nor dashes is accepted, gets converted to \ + UTC" + canonical (missing_tz_no_dash |> of_iso8601 |> to_rfc3339) ; - check_string "can process missing tz with dashes, but return without dashes" - missing_tz_no_dash - (missing_tz_dash |> of_iso8601 |> to_rfc3339) ; - check_float "to_unix_time assumes UTC" 1607620760. - (missing_tz_no_dash |> of_iso8601 |> to_unix_time) ; - let localtime' = localtime () in - check_string "to_rfc3339 inverts of_iso8601 for localtime" - (localtime' |> to_rfc3339) - (localtime' |> to_rfc3339 |> of_iso8601 |> to_rfc3339) + check_string + "Timestamp without timezones, and dashes is accepted, gets converted to \ + UTC" + canonical + (missing_tz_dash |> of_iso8601 |> to_rfc3339) in let test_email_date (unix_timestamp, expected) = let formatted = of_unix_time unix_timestamp |> to_rfc822 in check_string "String is properly RFC-822-formatted" expected formatted in + let test_no_timezone_to_unix () = + (* this is allowed, but it will print a warning to stdout *) + let missing_tz_no_dash = "20201210T17:19:20" in + let with_tz_no_dash = "20201210T17:19:20Z" in + let to_unix_time dt = dt |> of_iso8601 |> to_unix_time in + check_float "Datetime without timezone assumes it's in UTC" + (to_unix_time with_tz_no_dash) + (to_unix_time missing_tz_no_dash) + in let test_email_dates () = let dates = [ @@ -113,7 +163,8 @@ let tests = in [ ("test_of_unix_time_invertible", `Quick, test_of_unix_time_invertible) - ; ("test_only_utc", `Quick, test_only_utc) + ; ("test_only_utc", `Quick, test_iso8601) + ; ("Roundtrip conversion", `Quick, test_roundtrip_conversion) ; ("test_ca333908", `Quick, test_ca333908) ; ( "test_of_iso8601_invertible_when_no_dashes" , `Quick @@ -123,9 +174,14 @@ let tests = , `Quick , test_to_rfc3339_backwards_compatibility ) + ; ("localtime is printed without timezone", `Quick, test_localtime) ; ("test_localtime_string", `Quick, test_localtime_string) ; ("test_ca342171", `Quick, test_ca342171) - ; ("test_xsi894", `Quick, test_xsi894) + ; ("Parsing datetimes without timezones", `Quick, test_xsi894) + ; ( "Date w/o timezone to POSIX time conversion" + , `Quick + , test_no_timezone_to_unix + ) ; ("RFC 822 formatting", `Quick, test_email_dates) ] diff --git a/ocaml/libs/clock/test_timer.ml b/ocaml/libs/clock/test_timer.ml index b94a3c470d2..ecfafa8dcbd 100644 --- a/ocaml/libs/clock/test_timer.ml +++ b/ocaml/libs/clock/test_timer.ml @@ -7,7 +7,7 @@ let spans = let test_timer_remaining = let print = Fmt.to_to_string Mtime.Span.pp in - Test.make ~name:"Timer.remaining" ~print spans @@ fun duration -> + Test.make ~count:20 ~name:"Timer.remaining" ~print spans @@ fun duration -> let timer = Timer.start ~duration in let half = Timer.span_to_s duration /. 2. in let elapsed = Mtime_clock.counter () in diff --git a/ocaml/libs/http-lib/http_client.ml b/ocaml/libs/http-lib/http_client.ml index 163cfdd0dde..8e8c5cd2d44 100644 --- a/ocaml/libs/http-lib/http_client.ml +++ b/ocaml/libs/http-lib/http_client.ml @@ -187,7 +187,9 @@ let response_of_fd ?(use_fastpath = false) fd = with | Unix.Unix_error (_, _, _) as e -> raise e - | _ -> + | e -> + D.debug "%s: returning no response because of the exception: %s" + __FUNCTION__ (Printexc.to_string e) ; None (** See perftest/tests.ml *) diff --git a/ocaml/libs/http-lib/xMLRPC.ml b/ocaml/libs/http-lib/xMLRPC.ml index a918f9b4e13..e9e425b976f 100644 --- a/ocaml/libs/http-lib/xMLRPC.ml +++ b/ocaml/libs/http-lib/xMLRPC.ml @@ -68,7 +68,7 @@ module To = struct let boolean b = value (box "boolean" [pcdata (if b then "1" else "0")]) let datetime s = - value (box "dateTime.iso8601" [pcdata (Xapi_stdext_date.Date.to_string s)]) + value (box "dateTime.iso8601" [pcdata (Xapi_stdext_date.Date.to_rfc3339 s)]) let double x = let txt = @@ -197,7 +197,7 @@ module From = struct let boolean = value (singleton ["boolean"] (( <> ) (Xml.PCData "0"))) let datetime x = - Xapi_stdext_date.Date.of_string + Xapi_stdext_date.Date.of_iso8601 (value (singleton ["dateTime.iso8601"] (pcdata id)) x) let double = value (singleton ["double"] (pcdata float_of_string)) diff --git a/ocaml/libs/http-lib/xMLRPC.mli b/ocaml/libs/http-lib/xMLRPC.mli index c8a7ca32af6..165f2e6ec52 100644 --- a/ocaml/libs/http-lib/xMLRPC.mli +++ b/ocaml/libs/http-lib/xMLRPC.mli @@ -59,7 +59,7 @@ module To : sig val boolean : bool -> xmlrpc (** Marshal a boolean. *) - val datetime : Xapi_stdext_date.Date.iso8601 -> xmlrpc + val datetime : Xapi_stdext_date.Date.t -> xmlrpc (** Marshal a date-time. *) val double : float -> xmlrpc @@ -98,7 +98,7 @@ module From : sig val boolean : xmlrpc -> bool (** Parse a boolean. *) - val datetime : xmlrpc -> Xapi_stdext_date.Date.iso8601 + val datetime : xmlrpc -> Xapi_stdext_date.Date.t (** Parse a date-time. *) val double : xmlrpc -> float diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 22d1e942288..ab097253dcb 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -18,6 +18,14 @@ open D let fail fmt = Printf.ksprintf failwith fmt +let failures = Atomic.make 0 + +let not_throttled () = + let old = Atomic.fetch_and_add failures 1 in + old < 2 + +let reset_throttled () = Atomic.set failures 0 + module W3CBaggage = struct module Key = struct let is_valid_key str = @@ -86,12 +94,6 @@ let validate_attribute (key, value) = && Re.execp attribute_key_regex key && W3CBaggage.Key.is_valid_key key -let observe = Atomic.make false - -let set_observe mode = Atomic.set observe mode - -let get_observe () = Atomic.get observe - module SpanKind = struct type t = Server | Consumer | Client | Producer | Internal [@@deriving rpcty] @@ -133,6 +135,10 @@ end module Attributes = struct include Map.Make (String) + let merge_element map (key, value) = add key value map + + let merge_into into list = List.fold_left merge_element into list + let of_list list = List.to_seq list |> of_seq let to_assoc_list attr = to_seq attr |> List.of_seq @@ -148,18 +154,80 @@ module SpanEvent = struct type t = {name: string; time: float; attributes: string Attributes.t} end +module Trace_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end = struct + type t = int64 * int64 + + let make () = (Random.bits64 (), Random.bits64 ()) + + let of_string s = + try Scanf.sscanf s "%016Lx%016Lx" (fun a b -> (a, b)) + with e -> + D.debug "Failed to parse trace id %s: %s" s (Printexc.to_string e) ; + (* don't cause XAPI to fail *) + (0L, 0L) + + let to_string (a, b) = Printf.sprintf "%016Lx%016Lx" a b + + let compare (a1, a2) (b1, b2) = + match Int64.compare a1 b1 with 0 -> Int64.compare a2 b2 | n -> n +end + +module Span_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end = struct + type t = int64 + + let make = Random.bits64 + + let of_string s = + try Scanf.sscanf s "%Lx" Fun.id + with e -> + D.debug "Failed to parse span id %s: %s" s (Printexc.to_string e) ; + (* don't cause XAPI to fail *) + 0L + + let to_string = Printf.sprintf "%016Lx" + + let compare = Int64.compare +end + module SpanContext = struct - type t = {trace_id: string; span_id: string} [@@deriving rpcty] + type t = {trace_id: Trace_id.t; span_id: Span_id.t} [@@deriving rpcty] let context trace_id span_id = {trace_id; span_id} - let to_traceparent t = Printf.sprintf "00-%s-%s-01" t.trace_id t.span_id + let to_traceparent t = + Printf.sprintf "00-%s-%s-01" + (Trace_id.to_string t.trace_id) + (Span_id.to_string t.span_id) let of_traceparent traceparent = let elements = String.split_on_char '-' traceparent in match elements with | ["00"; trace_id; span_id; _] -> - Some {trace_id; span_id} + Some + { + trace_id= Trace_id.of_string trace_id + ; span_id= Span_id.of_string span_id + } | _ -> None @@ -195,17 +263,15 @@ module Span = struct let get_context t = t.context - let generate_id n = String.init n (fun _ -> "0123456789abcdef".[Random.int 16]) - let start ?(attributes = Attributes.empty) ~name ~parent ~span_kind () = let trace_id = match parent with | None -> - generate_id 32 + Trace_id.make () | Some span_parent -> span_parent.context.trace_id in - let span_id = generate_id 16 in + let span_id = Span_id.make () in let context : SpanContext.t = {trace_id; span_id} in (* Using gettimeofday over Mtime as it is better for sharing timestamps between the systems *) let begin_time = Unix.gettimeofday () in @@ -303,16 +369,25 @@ module Span = struct span end -module Spans = struct - let lock = Mutex.create () - - let spans = Hashtbl.create 100 +module TraceMap = Map.Make (Trace_id) +module SpanMap = Map.Make (Span_id) - let span_count () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length spans +module Spans = struct + let spans = Atomic.make TraceMap.empty + + let rec update_spans f arg = + let old = Atomic.get spans in + let next = f old arg in + if Atomic.compare_and_set spans old next then + () + else ( + (* TODO: should use Kcas.update, or Saturn skip_lists for domains *) + Thread.yield () ; + (update_spans [@tailcall]) f arg ) + let span_count () = TraceMap.cardinal (Atomic.get spans) + let max_spans = Atomic.make 2500 let set_max_spans x = Atomic.set max_spans x @@ -321,140 +396,117 @@ module Spans = struct let set_max_traces x = Atomic.set max_traces x - let finished_spans = Hashtbl.create 100 + let finished_spans = Atomic.make ([], 0) - let span_hashtbl_is_empty () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length spans = 0 - ) + let span_hashtbl_is_empty () = TraceMap.is_empty (Atomic.get spans) - let finished_span_hashtbl_is_empty () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length finished_spans = 0 - ) + let finished_span_hashtbl_is_empty () = Atomic.get finished_spans |> snd = 0 - let add_to_spans ~(span : Span.t) = + let add_to_spans_unlocked spans (span : Span.t) = let key = span.context.trace_id in - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt spans key with - | None -> - if Hashtbl.length spans < Atomic.get max_traces then - Hashtbl.add spans key [span] - else - debug "%s exceeded max traces when adding to span table" - __FUNCTION__ - | Some span_list -> - if List.length span_list < Atomic.get max_spans then - Hashtbl.replace spans key (span :: span_list) - else - debug "%s exceeded max traces when adding to span table" - __FUNCTION__ - ) + match TraceMap.find_opt key spans with + | None -> + if TraceMap.cardinal spans < Atomic.get max_traces then + TraceMap.add key (SpanMap.singleton span.context.span_id span) spans + else ( + if not_throttled () then + debug "%s exceeded max traces when adding to span table" + __FUNCTION__ ; + spans + ) + | Some span_list -> + if SpanMap.cardinal span_list < Atomic.get max_spans then + TraceMap.add key + (SpanMap.add span.context.span_id span span_list) + spans + else ( + if not_throttled () then + debug "%s exceeded max traces when adding to span table" + __FUNCTION__ ; + spans + ) - let remove_from_spans span = - let key = span.Span.context.trace_id in - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt spans key with - | None -> - debug "%s span does not exist or already finished" __FUNCTION__ ; - None - | Some span_list -> - ( match - List.filter (fun x -> x.Span.context <> span.context) span_list - with - | [] -> - Hashtbl.remove spans key - | filtered_list -> - Hashtbl.replace spans key filtered_list - ) ; - Some span - ) + let add_to_spans ~span = update_spans add_to_spans_unlocked span - let add_to_finished span = + let remove_from_spans_unlocked spans span = let key = span.Span.context.trace_id in - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt finished_spans key with - | None -> - if Hashtbl.length finished_spans < Atomic.get max_traces then - Hashtbl.add finished_spans key [span] - else - debug "%s exceeded max traces when adding to finished span table" - __FUNCTION__ - | Some span_list -> - if List.length span_list < Atomic.get max_spans then - Hashtbl.replace finished_spans key (span :: span_list) - else - debug "%s exceeded max traces when adding to finished span table" - __FUNCTION__ - ) + match TraceMap.find_opt key spans with + | None -> + if not_throttled () then + debug "%s span does not exist or already finished" __FUNCTION__ ; + spans + | Some span_list -> + let span_list = SpanMap.remove span.Span.context.span_id span_list in + if SpanMap.is_empty span_list then + TraceMap.remove key spans + else + TraceMap.add key span_list spans + + let remove_from_spans span = + update_spans remove_from_spans_unlocked span ; + Some span + + let rec add_to_finished span = + let ((spans, n) as old) = Atomic.get finished_spans in + if n < Atomic.get max_spans then + let next = (span :: spans, n + 1) in + if Atomic.compare_and_set finished_spans old next then + () + else ( + Thread.yield () ; + (add_to_finished [@tailcall]) span + ) + else if not_throttled () then + debug "%s exceeded max traces when adding to finished span table" + __FUNCTION__ let mark_finished span = Option.iter add_to_finished (remove_from_spans span) - let span_is_finished x = - match x with - | None -> - false - | Some (span : Span.t) -> - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt finished_spans span.context.trace_id with - | None -> - false - | Some span_list -> - List.mem span span_list - ) + let empty_finished = ([], 0) (** since copies the existing finished spans and then clears the existing spans as to only export them once *) let since () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - let copy = Hashtbl.copy finished_spans in - Hashtbl.clear finished_spans ; - copy - ) + let copy = Atomic.exchange finished_spans empty_finished in + reset_throttled () ; copy - let dump () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.(copy spans, Hashtbl.copy finished_spans) - ) + let dump () = (Atomic.get spans, Atomic.get finished_spans) module GC = struct - let lock = Mutex.create () - let span_timeout = Atomic.make 86400. (* one day in seconds *) let span_timeout_thread = ref None - let gc_inactive_spans () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.filter_map_inplace - (fun _ spanlist -> - let filtered = - List.filter_map - (fun span -> - let elapsed = - Unix.gettimeofday () -. span.Span.begin_time - in - if elapsed > Atomic.get span_timeout *. 1000000. then ( - debug "Tracing: Span %s timed out, forcibly finishing now" - span.Span.context.span_id ; - let span = - Span.finish ~span - ~attributes: - (Attributes.singleton "gc_inactive_span_timeout" - (string_of_float elapsed) - ) - () - in - add_to_finished span ; None - ) else - Some span - ) - spanlist - in - match filtered with [] -> None | spans -> Some spans - ) - spans - ) + let gc_inactive_spans_unlocked spans () = + TraceMap.filter_map + (fun _ spanlist -> + let filtered = + SpanMap.filter_map + (fun _ span -> + let elapsed = Unix.gettimeofday () -. span.Span.begin_time in + if elapsed > Atomic.get span_timeout *. 1000000. then ( + if not_throttled () then + debug "Tracing: Span %s timed out, forcibly finishing now" + (Span_id.to_string span.Span.context.span_id) ; + let span = + Span.finish ~span + ~attributes: + (Attributes.singleton "gc_inactive_span_timeout" + (string_of_float elapsed) + ) + () + in + add_to_finished span ; None + ) else + Some span + ) + spanlist + in + if SpanMap.is_empty filtered then None else Some filtered + ) + spans + + let gc_inactive_spans () = update_spans gc_inactive_spans_unlocked () let initialise_thread ~timeout = Atomic.set span_timeout timeout ; @@ -481,6 +533,18 @@ module TracerProvider = struct ; enabled: bool } + let no_op = + { + name_label= "" + ; attributes= Attributes.empty + ; endpoints= [] + ; enabled= false + } + + let current = Atomic.make no_op + + let get_current () = Atomic.get current + let get_name_label t = t.name_label let get_attributes t = Attributes.to_assoc_list t.attributes @@ -510,7 +574,7 @@ module TracerProvider = struct might not be aware that a TracerProvider has already been created.*) error "Tracing : TracerProvider %s already exists" name_label ) ; - if enabled then set_observe true + if enabled then Atomic.set current provider ) let get_tracer_providers_unlocked () = @@ -520,6 +584,16 @@ module TracerProvider = struct Xapi_stdext_threads.Threadext.Mutex.execute lock get_tracer_providers_unlocked + let update_providers_unlocked () = + let providers = get_tracer_providers_unlocked () in + match List.find_opt (fun provider -> provider.enabled) providers with + | None -> + Atomic.set current no_op ; + Atomic.set Spans.spans TraceMap.empty ; + Atomic.set Spans.finished_spans Spans.empty_finished + | Some enabled -> + Atomic.set current enabled + let set ?enabled ?attributes ?endpoints ~uuid () = let update_provider (provider : t) enabled attributes endpoints = let enabled = Option.value ~default:provider.enabled enabled in @@ -544,58 +618,22 @@ module TracerProvider = struct fail "The TracerProvider : %s does not exist" uuid in Hashtbl.replace tracer_providers uuid provider ; - if - List.for_all - (fun provider -> not provider.enabled) - (get_tracer_providers_unlocked ()) - then ( - set_observe false ; - Xapi_stdext_threads.Threadext.Mutex.execute Spans.lock (fun () -> - Hashtbl.clear Spans.spans ; - Hashtbl.clear Spans.finished_spans - ) - ) else - set_observe true + update_providers_unlocked () ) let destroy ~uuid = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> let _ = Hashtbl.remove tracer_providers uuid in - if Hashtbl.length tracer_providers = 0 then set_observe false else () + update_providers_unlocked () ) end -module Tracer = struct - type t = {_name: string; provider: TracerProvider.t} - - let create ~name ~provider = {_name= name; provider} - - let no_op = - let provider : TracerProvider.t = - { - name_label= "" - ; attributes= Attributes.empty - ; endpoints= [] - ; enabled= false - } - in - {_name= ""; provider} +let get_observe () = TracerProvider.(get_current ()).enabled - let get_tracer ~name = - if Atomic.get observe then ( - let providers = - Xapi_stdext_threads.Threadext.Mutex.execute TracerProvider.lock - TracerProvider.get_tracer_providers_unlocked - in +module Tracer = struct + type t = TracerProvider.t - match List.find_opt TracerProvider.get_enabled providers with - | Some provider -> - create ~name ~provider - | None -> - warn "No provider found for tracing %s" name ; - no_op - ) else - no_op + let get_tracer ~name:_ = TracerProvider.get_current () let span_of_span_context context name : Span.t = { @@ -613,21 +651,17 @@ module Tracer = struct let start ~tracer:t ?(attributes = []) ?(span_kind = SpanKind.Internal) ~name ~parent () : (Span.t option, exn) result = - (* Do not start span if the TracerProvider is diabled*) - if not t.provider.enabled then + let open TracerProvider in + (* Do not start span if the TracerProvider is disabled*) + if not t.enabled then ok_none else - let attributes = Attributes.of_list attributes in - let attributes = - Attributes.union - (fun _k a _b -> Some a) - attributes t.provider.attributes - in + let attributes = Attributes.merge_into t.attributes attributes in let span = Span.start ~attributes ~name ~parent ~span_kind () in Spans.add_to_spans ~span ; Ok (Some span) let update_span_with_parent span (parent : Span.t option) = - if Atomic.get observe then + if (TracerProvider.get_current ()).enabled then match parent with | None -> Some span @@ -667,8 +701,6 @@ module Tracer = struct span ) - let span_is_finished x = Spans.span_is_finished x - let span_hashtbl_is_empty () = Spans.span_hashtbl_is_empty () let finished_span_hashtbl_is_empty () = @@ -679,8 +711,8 @@ let enable_span_garbage_collector ?(timeout = 86400.) () = Spans.GC.initialise_thread ~timeout let with_tracing ?(attributes = []) ?(parent = None) ~name f = - if Atomic.get observe then ( - let tracer = Tracer.get_tracer ~name in + let tracer = Tracer.get_tracer ~name in + if tracer.enabled then ( match Tracer.start ~tracer ~attributes ~name ~parent () with | Ok span -> ( try diff --git a/ocaml/libs/tracing/tracing.mli b/ocaml/libs/tracing/tracing.mli index 42b700ebb51..e78153c9790 100644 --- a/ocaml/libs/tracing/tracing.mli +++ b/ocaml/libs/tracing/tracing.mli @@ -54,18 +54,40 @@ module SpanEvent : sig type t = {name: string; time: float; attributes: string Attributes.t} end -module SpanContext : sig +module Span_id : sig type t - val context : string -> string -> t + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end + +module Trace_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end + +module SpanContext : sig + type t val to_traceparent : t -> string val of_traceparent : string -> t option - val trace_id_of_span_context : t -> string + val trace_id_of_span_context : t -> Trace_id.t - val span_id_of_span_context : t -> string + val span_id_of_span_context : t -> Span_id.t end module Span : sig @@ -98,6 +120,10 @@ module Span : sig val get_attributes : t -> (string * string) list end +module TraceMap : module type of Map.Make (Trace_id) + +module SpanMap : module type of Map.Make (Span_id) + module Spans : sig val set_max_spans : int -> unit @@ -105,10 +131,9 @@ module Spans : sig val span_count : unit -> int - val since : unit -> (string, Span.t list) Hashtbl.t + val since : unit -> Span.t list * int - val dump : - unit -> (string, Span.t list) Hashtbl.t * (string, Span.t list) Hashtbl.t + val dump : unit -> Span.t SpanMap.t TraceMap.t * (Span.t list * int) end module Tracer : sig @@ -140,8 +165,6 @@ module Tracer : sig val finish : ?error:exn * string -> Span.t option -> (Span.t option, exn) result - val span_is_finished : Span.t option -> bool - val span_hashtbl_is_empty : unit -> bool val finished_span_hashtbl_is_empty : unit -> bool diff --git a/ocaml/libs/tracing/tracing_export.ml b/ocaml/libs/tracing/tracing_export.ml index 5bb154d20c2..43761cdde1c 100644 --- a/ocaml/libs/tracing/tracing_export.ml +++ b/ocaml/libs/tracing/tracing_export.ml @@ -83,13 +83,24 @@ module Content = struct ) in { - id= s |> Span.get_context |> SpanContext.span_id_of_span_context - ; traceId= s |> Span.get_context |> SpanContext.trace_id_of_span_context + id= + s + |> Span.get_context + |> SpanContext.span_id_of_span_context + |> Span_id.to_string + ; traceId= + s + |> Span.get_context + |> SpanContext.trace_id_of_span_context + |> Trace_id.to_string ; parentId= s |> Span.get_parent |> Option.map (fun x -> - x |> Span.get_context |> SpanContext.span_id_of_span_context + x + |> Span.get_context + |> SpanContext.span_id_of_span_context + |> Span_id.to_string ) ; name= s |> Span.get_name ; timestamp= int_of_float (Span.get_begin_time s *. 1000000.) @@ -248,9 +259,7 @@ module Destination = struct | Bugtool -> (file_export, "Tracing.File.export") in - let all_spans = - Hashtbl.fold (fun _ spans acc -> spans @ acc) traces [] - in + let all_spans, count = traces in let attributes = [ ("export.span.count", all_spans |> List.length |> string_of_int) @@ -258,9 +267,7 @@ module Destination = struct ; ( "xs.tracing.spans_table.count" , Spans.span_count () |> string_of_int ) - ; ( "xs.tracing.finished_spans_table.count" - , traces |> Hashtbl.length |> string_of_int - ) + ; ("xs.tracing.finished_spans_table.count", string_of_int count) ] in let@ _ = with_tracing ~parent ~attributes ~name in @@ -273,17 +280,15 @@ module Destination = struct debug "Tracing: unable to export span : %s" (Printexc.to_string exn) let flush_spans () = - let span_list = Spans.since () in - let attributes = - [("export.traces.count", Hashtbl.length span_list |> string_of_int)] - in + let ((_span_list, span_count) as span_info) = Spans.since () in + let attributes = [("export.traces.count", string_of_int span_count)] in let@ parent = with_tracing ~parent:None ~attributes ~name:"Tracing.flush_spans" in TracerProvider.get_tracer_providers () |> List.filter TracerProvider.get_enabled |> List.concat_map TracerProvider.get_endpoints - |> List.iter (export_to_endpoint parent span_list) + |> List.iter (export_to_endpoint parent span_info) let delay = Delay.make () diff --git a/ocaml/libs/vhd/vhd_format/f.ml b/ocaml/libs/vhd/vhd_format/f.ml index 6109c8aa713..e3bfc97a1fe 100644 --- a/ocaml/libs/vhd/vhd_format/f.ml +++ b/ocaml/libs/vhd/vhd_format/f.ml @@ -31,11 +31,7 @@ exception Cstruct_differ let cstruct_equal a b = let check_contents a b = try - for i = 0 to Cstruct.length a - 1 do - let a' = Cstruct.get_char a i in - let b' = Cstruct.get_char b i in - if a' <> b' then raise Cstruct_differ - done ; + if Cstruct.compare a b <> 0 then raise Cstruct_differ ; true with _ -> false in diff --git a/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml b/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml index 3073ba88bca..68803676df3 100644 --- a/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml +++ b/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml @@ -99,7 +99,7 @@ let check_written_sectors t expected = | false -> fail (Failure "read empty sector, expected data") | true -> - Alcotest.check cstruct __LOC__ data y ; + Alcotest.check cstruct ~pos:__POS__ "" data y ; return () ) >>= fun () -> loop xs @@ -139,10 +139,10 @@ let check_raw_stream_contents t expected = let actual = Cstruct.sub data (i * 512) 512 in ( if not (List.mem_assoc sector expected) then - Alcotest.check cstruct __LOC__ empty_sector actual + Alcotest.check cstruct ~pos:__POS__ "" empty_sector actual else let expected = List.assoc sector expected in - Alcotest.check cstruct __LOC__ expected actual + Alcotest.check cstruct ~pos:__POS__ "" expected actual ) ; check (i + 1) in @@ -163,7 +163,7 @@ let check_raw_stream_contents t expected = else let expected = List.assoc offset expected in let actual = Cstruct.sub remaining 0 F.sector_size in - Alcotest.check cstruct __LOC__ expected actual ; + Alcotest.check cstruct ~pos:__POS__ "" expected actual ; loop Int64.(add offset 1L) (Cstruct.shift remaining F.sector_size) in loop offset data diff --git a/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml b/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml index 458c0c7cce6..93ea89365cf 100644 --- a/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml +++ b/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml @@ -51,9 +51,9 @@ let check_empty_disk size = let filename = make_new_filename () in Vhd_IO.create_dynamic ~filename ~size () >>= fun vhd -> Vhd_IO.openchain filename false >>= fun vhd' -> - Alcotest.check Lib.header __LOC__ vhd.Vhd.header vhd'.Vhd.header ; - Alcotest.check Lib.footer __LOC__ vhd.Vhd.footer vhd'.Vhd.footer ; - Alcotest.check Lib.bat __LOC__ vhd.Vhd.bat vhd'.Vhd.bat ; + Alcotest.check Lib.header ~pos:__POS__ "" vhd.Vhd.header vhd'.Vhd.header ; + Alcotest.check Lib.footer ~pos:__POS__ "" vhd.Vhd.footer vhd'.Vhd.footer ; + Alcotest.check Lib.bat ~pos:__POS__ "" vhd.Vhd.bat vhd'.Vhd.bat ; Vhd_IO.close vhd' >>= fun () -> Vhd_IO.close vhd (* Create a disk, resize it, check headers *) @@ -64,7 +64,8 @@ let check_resize size = let vhd = Vhd.resize vhd newsize in Vhd_IO.close vhd >>= fun () -> Vhd_IO.openchain filename false >>= fun vhd' -> - Alcotest.(check int64) __LOC__ newsize vhd.Vhd.footer.Footer.current_size ; + Alcotest.(check int64 ~pos:__POS__) + "" newsize vhd.Vhd.footer.Footer.current_size ; Vhd_IO.close vhd' (* Create a snapshot, check headers *) @@ -74,9 +75,9 @@ let check_empty_snapshot size = let filename = make_new_filename () in Vhd_IO.create_difference ~filename ~parent:vhd () >>= fun vhd' -> Vhd_IO.openchain filename false >>= fun vhd'' -> - Alcotest.check Lib.header __LOC__ vhd'.Vhd.header vhd''.Vhd.header ; - Alcotest.check Lib.footer __LOC__ vhd'.Vhd.footer vhd''.Vhd.footer ; - Alcotest.check Lib.bat __LOC__ vhd'.Vhd.bat vhd''.Vhd.bat ; + Alcotest.check Lib.header ~pos:__POS__ "" vhd'.Vhd.header vhd''.Vhd.header ; + Alcotest.check Lib.footer ~pos:__POS__ "" vhd'.Vhd.footer vhd''.Vhd.footer ; + Alcotest.check Lib.bat ~pos:__POS__ "" vhd'.Vhd.bat vhd''.Vhd.bat ; Vhd_IO.close vhd'' >>= fun () -> Vhd_IO.close vhd' >>= fun () -> Vhd_IO.close vhd diff --git a/ocaml/libs/xapi-rrd/lib_test/dune b/ocaml/libs/xapi-rrd/lib_test/dune index 7a66380a63e..1c62f716d46 100644 --- a/ocaml/libs/xapi-rrd/lib_test/dune +++ b/ocaml/libs/xapi-rrd/lib_test/dune @@ -25,3 +25,5 @@ xapi-rrd ) ) + +(data_only_dirs test_data) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml b/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml index bd8664e9c87..65fb540650b 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml @@ -197,9 +197,6 @@ let test_block () = in if Unix.geteuid () = 0 then run () - else - Alcotest.check_raises "non-root fails to create blockdevice" - (Failure "with_temp_blk") run in test_fd with_make [("read", read_fd); ("write", write_fd); ("lseek", test_lseek)] diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml index 45e9bba5efb..ef0f98ce13a 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml @@ -28,6 +28,8 @@ let rfc822_of_float = of_unix_time let rfc822_to_string = to_rfc822 +let eq = equal + type iso8601 = t type rfc822 = t diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli index 62e894808bf..7fb29404306 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli @@ -57,8 +57,8 @@ val localtime : unit -> t (** Comparisons *) -val eq : t -> t -> bool -(** [eq a b] returns whether [a] and [b] are equal *) +val equal : t -> t -> bool +(** [equal a b] returns whether [a] and [b] are equal *) val compare : t -> t -> int (** [compare a b] returns -1 if [a] is earlier than [b], 1 if [a] is later than @@ -77,29 +77,39 @@ val diff : t -> t -> Ptime.Span.t (** Deprecated bindings, these will be removed in a future release: *) +val eq : t -> t -> bool +[@@deprecated "Use Date.equal"] +(** [eq a b] returns whether [a] and [b] are equal *) + val rfc822_to_string : t -> string +[@@deprecated "Use Date.to_rfc822"] (** Same as {!to_rfc822} *) val rfc822_of_float : float -> t +[@@deprecated "Use Date.of_unix_time"] (** Same as {!of_unix_time} *) val of_float : float -> t +[@@deprecated "Use Date.of_unix_time"] (** Same as {!of_unix_time} *) val to_float : t -> float +[@@deprecated "Use Date.to_unix_time"] (** Same as {!to_unix_time} *) val to_string : t -> string +[@@deprecated "Use Date.to_rfc3339"] (** Same as {!to_rfc3339} *) val of_string : string -> t +[@@deprecated "Use Date.of_iso8601"] (** Same as {!of_iso8601} *) -val never : t +val never : t [@@deprecated "Use Date.epoch"] (** Same as {!epoch} *) (** Deprecated alias for {!t} *) -type iso8601 = t +type iso8601 = t [@@deprecated "Use Date.t"] (** Deprecated alias for {!t} *) -type rfc822 = t +type rfc822 = t [@@deprecated "Use Date.t"] diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml index 83bc7f00bd2..be848f7b8a4 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml @@ -115,7 +115,8 @@ let test_time_limited_write = let test_time_limited_read = let gen = Gen.tup2 Generate.t Generate.timeouts and print = Print.tup2 Generate.print Print.float in - Test.make ~name:__FUNCTION__ ~print gen @@ fun (behaviour, timeout) -> + Test.make ~count:20 ~name:__FUNCTION__ ~print gen + @@ fun (behaviour, timeout) -> skip_blk behaviour.kind ; skip_dirlnk behaviour.kind ; skip_blk_timed behaviour ; @@ -166,7 +167,7 @@ let test_time_limited_read = let test_proxy = let gen = Generate.t and print = Generate.print in - Test.make ~name:__FUNCTION__ ~print gen @@ fun behaviour -> + Test.make ~count:20 ~name:__FUNCTION__ ~print gen @@ fun behaviour -> if behaviour.kind <> Unix.S_SOCK then QCheck2.assume_fail () ; let test wrapped_fd = @@ -252,7 +253,7 @@ let check_subsets msg ((s1, s2, s3) as all) ((s1', s2', s3') as all') = let test_select = let gen, print = Generate.select_input in - Test.make ~long_factor:10 ~name:__FUNCTION__ ~print gen @@ fun t -> + Test.make ~name:__FUNCTION__ ~print gen @@ fun t -> (* epoll raised EEXIST, but none of the actual callers in XAPI need this, so skip *) @@ -286,4 +287,5 @@ let tests = let () = (* avoid SIGPIPE *) let (_ : Sys.signal_behavior) = Sys.signal Sys.sigpipe Sys.Signal_ignore in - Xapi_stdext_unix.Unixext.test_open 1024 + (* TODO: reenable once the epoll branch is merged Xapi_stdext_unix.Unixext.test_open 1024 *) + () diff --git a/ocaml/license/daily_license_check.ml b/ocaml/license/daily_license_check.ml index b107bfef755..3b6edecbb3e 100644 --- a/ocaml/license/daily_license_check.ml +++ b/ocaml/license/daily_license_check.ml @@ -13,7 +13,7 @@ let get_hosts all_license_params threshold = List.fold_left (fun acc (name_label, license_params) -> let expiry = List.assoc "expiry" license_params in - let expiry = Xapi_stdext_date.Date.(to_float (of_string expiry)) in + let expiry = Xapi_stdext_date.Date.(to_unix_time (of_iso8601 expiry)) in if expiry < threshold then name_label :: acc else @@ -23,7 +23,7 @@ let get_hosts all_license_params threshold = let check_license now pool_license_state all_license_params = let expiry = List.assoc "expiry" pool_license_state in - let expiry = Xapi_stdext_date.Date.(to_float (of_string expiry)) in + let expiry = Xapi_stdext_date.Date.(to_unix_time (of_iso8601 expiry)) in let days = days_to_expiry now expiry in if days <= 0. then Expired (get_hosts all_license_params now) diff --git a/ocaml/message-switch/core_test/concur-rpc-test.sh b/ocaml/message-switch/core_test/concur-rpc-test.sh index a91768972fe..1403946ba5b 100755 --- a/ocaml/message-switch/core_test/concur-rpc-test.sh +++ b/ocaml/message-switch/core_test/concur-rpc-test.sh @@ -9,37 +9,36 @@ trap "cleanup" TERM INT function cleanup { rm -rf "${SWITCHPATH}" } +SECS=${SECS:-0.1} rm -rf "${SWITCHPATH}" && mkdir -p "${SWITCHPATH}" echo Test message switch concurrent processing echo Checking the switch can start late -test -x ./server_unix_main.exe || exit 1 -./server_unix_main.exe -path "$SPATH" & -sleep 1 -test -x ../switch/switch_main.exe && test -x ./client_unix_main.exe || exit 1 -../switch/switch_main.exe --path "$SPATH" --statedir "${SWITCHPATH}" & -./client_unix_main.exe -path "$SPATH" -secs 5 -sleep 2 +./server_unix_main.exe -path "${SPATH}" & +SERVER=$! +sleep "${SECS}" +../switch/switch_main.exe --path "${SPATH}" --statedir "${SWITCHPATH}" & +./client_unix_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Lwt to Lwt -test -x lwt/server_main.exe && test -x lwt/client_main.exe || exit 1 -lwt/server_main.exe -path "$SPATH" -concurrent & -lwt/client_main.exe -path "$SPATH" -secs 5 -sleep 2 +lwt/server_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +lwt/client_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Async to Lwt -test -x lwt/server_main.exe && test -x async/client_async_main.exe || exit 1 -lwt/server_main.exe -path "$SPATH" -concurrent & -async/client_async_main.exe -path "$SPATH" -secs 5 -sleep 2 +lwt/server_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Async to Async -test -x async/server_async_main.exe && test -x async/client_async_main.exe || exit 1 -async/server_async_main.exe -path "$SPATH" -concurrent & -async/client_async_main.exe -path "$SPATH" -secs 5 -sleep 2 +async/server_async_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" -../cli/main.exe shutdown --path "$SPATH" -sleep 2 +../cli/main.exe shutdown --path "${SPATH}" diff --git a/ocaml/message-switch/core_test/interop-test.sh b/ocaml/message-switch/core_test/interop-test.sh deleted file mode 100755 index 912d47f2349..00000000000 --- a/ocaml/message-switch/core_test/interop-test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -ex - -LINKPATH="${TMPDIR:-/tmp}/link_test" - -rm -rf ${LINKPATH} && mkdir -p ${LINKPATH} - -lwt/link_test_main.exe -PYTHONPATH=core python message_switch_test.py diff --git a/ocaml/message-switch/core_test/message_switch_test.py b/ocaml/message-switch/core_test/message_switch_test.py deleted file mode 100644 index 5566adf8a08..00000000000 --- a/ocaml/message-switch/core_test/message_switch_test.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012 Citrix Systems Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import unittest, os -from message_switch import * - -try: - tmpdir = os.environ["TMPDIR"] -except KeyError: - tmpdir = "/tmp" - -basedir = os.path.join(tmpdir, "link_test") - -rpc_req = Message("hello", 1L, "reply_to") -rpc_res = Message("hello", 1L) - -class Internal_invariants(unittest.TestCase): - def test_Message_save_load(self): - for m in [rpc_req, rpc_res]: - n = Message.load(m.save()) - assert m.payload == n.payload - assert m.correlation_id == n.correlation_id - assert m.reply_to == n.reply_to - -def load(x): - path = os.path.join(basedir, x) - f = open(path, "r") - try: - return f.read() - finally: - f.close() - -class Ocaml_interop(unittest.TestCase): - def test_login(self): - py = Login("hello").to_request().to_string() - ocaml = load("login") - assert py == ocaml - def test_create_named(self): - py = Create_request("service").to_request().to_string() - ocaml = load("create") - assert py == ocaml - def test_create_anon(self): - py = Create_request().to_request().to_string() - ocaml = load("create.anon") - assert py == ocaml - def test_subscribe(self): - py = Subscribe("service").to_request().to_string() - ocaml = load("subscribe") - assert py == ocaml - def test_request(self): - py = Send("service", rpc_req).to_request().to_string() - ocaml = load("request") - assert py == ocaml - def test_response(self): - py = Send("service", rpc_res).to_request().to_string() - ocaml = load("reply") - assert py == ocaml - def test_transfer(self): - py = Transfer_request(3, 5.0).to_request().to_string() - ocaml = load("transfer") - assert py == ocaml - def test_ack(self): - py = Ack(3).to_request().to_string() - ocaml = load("ack") - assert py == ocaml - - def test_create_reply(self): - ocaml = Create_response.of_response(Http_response.of_string(load("create.reply"))) - assert ocaml.name == "service" - def test_transfer_reply(self): - ocaml = Transfer_response.of_response(Http_response.of_string(load("transfer.reply"))) - m = { - 1L: rpc_req, - 2L: rpc_res, - } - py = Transfer_response(m) - for k in py.messages: - assert k in ocaml.messages - assert str(py.messages[k]) == str(ocaml.messages[k]) - for k in ocaml.messages: - assert k in py.messages - assert str(py.messages[k]) == str(ocaml.messages[k]) - -if __name__ == "__main__": - unittest.main() diff --git a/ocaml/message-switch/dune b/ocaml/message-switch/dune index a0d445776e6..3daaf679370 100644 --- a/ocaml/message-switch/dune +++ b/ocaml/message-switch/dune @@ -1,3 +1,5 @@ (executable (name configure) (libraries dune-configurator findlib)) + +(data_only_dirs www python) diff --git a/ocaml/message-switch/python/message_switch.py b/ocaml/message-switch/python/message_switch.py deleted file mode 100755 index 460d4ee2e04..00000000000 --- a/ocaml/message-switch/python/message_switch.py +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012 Citrix Systems Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import json - -class Http_request: - def __init__(self, method, uri, body = None): - self.method = method - self.uri = uri - self.body = body - - def to_string(self): - body = "" - if self.body: - body = self.body - lines = [ - "%s %s HTTP/1.1" % (self.method, self.uri), - "Content-Length: %d" % len(body), - "", - body - ] - return "\r\n".join(lines) - -class Http_response: - def __init__(self, body): - self.body = body - - def to_string(self): - lines = [ - "HTTP/1.1 200 OK", - "Content-Length: %d" % len(self.body), - "", - self.body - ] - return "\r\n".join(lines) - - @classmethod - def of_string(cls, txt): - lines = txt.split("\r\n") - if lines[0] <> "HTTP/1.1 200 OK": - raise "Unexpected status line: %s" % lines[0] - rest = "\r\n".join(lines[3:]) - return cls(rest) - -class Message: - def __init__(self, payload, correlation_id, reply_to = None): - self.payload = payload - self.correlation_id = correlation_id - self.reply_to = reply_to - - def save(self): - result = { - "payload": self.payload, - "correlation_id": self.correlation_id - } - if self.reply_to: - result["reply_to"] = self.reply_to - return result - - @classmethod - def load(cls, x): - payload = x["payload"] - correlation_id = x["correlation_id"] - reply_to = None - if "reply_to" in x: - reply_to = x["reply_to"] - return cls(payload, correlation_id, reply_to) - - def __str__(self): - return json.dumps(self.save()) - -class Login: - def __init__(self, some_credential): - self.some_credential = some_credential - - def to_request(self): - return Http_request("GET", "/login/%s" % self.some_credential) - -class Create_request: - def __init__(self, name = None): - self.name = name - - def to_request(self): - uri = "/create" - if self.name: - uri = uri + "/" + self.name - return Http_request("GET", uri) - -class Create_response: - def __init__(self, name = None): - self.name = name - - @classmethod - def of_response(cls, response): - return cls(response.body) - - def to_response(self): - return Http_response(self.name) - -class Subscribe: - def __init__(self, name): - self.name = name - - def to_request(self): - return Http_request("GET", "/subscribe/%s" % self.name) - -class Send: - def __init__(self, name, message): - self.name = name - self.message = message - def to_request(self): - if self.message.reply_to: - return Http_request("POST", "/send/%s/%d/%s" % (self.name, self.message.correlation_id, self.message.reply_to), self.message.payload) - else: - return Http_request("POST", "/send/%s/%d" % (self.name, self.message.correlation_id), self.message.payload) - -class Transfer_request: - def __init__(self, ack_to, timeout): - self.ack_to = ack_to - self.timeout = timeout - - def to_request(self): - return Http_request("GET", "/transfer/%Ld/%.16g" % (self.ack_to, self.timeout)) - -class Transfer_response: - def __init__(self, messages): - self.messages = messages - - @classmethod - def of_response(cls, response): - x = json.loads(response.body) - result = {} - for (k, v) in x["messages"]: - result[long(k)] = Message.load(v) - return Transfer_response(result) - -class Ack: - def __init__(self, ack): - self.ack = ack - - def to_request(self): - return Http_request("GET", "/ack/%Ld" % self.ack) - -import string, socket - -default_config = { - "ip": "169.254.0.1", # HIMN IP of dom0 - "port": 8080, # default for xenswitch -} - -class End_of_file(Exception): - def __init__(self): - pass -class Bad_status(Exception): - def __init__(self, status): - self.status = status -class Missing_content_length(Exception): - def __init__(self): - pass -class StreamReader: - def __init__(self, sock): - self.sock = sock - self.buffered = "" - def read_fragment(self, n): - if len(self.buffered) > 0: - num_available = min(n, len(self.buffered)) - fragment = self.buffered[0:num_available] - self.buffered = self.buffered[num_available:] - return fragment - else: - self.buffered = self.sock.recv(16384) - if len(self.buffered) == 0: - raise End_of_file() - return self.read_fragment(n) - def read(self, n): - results = "" - while n > 0: - fragment = self.read_fragment(n) - n = n - len(fragment) - results = results + fragment - return results - - def readline(self): - results = "" - eol = False - while not eol: - byte = self.read(1) - if byte == "\n": - eol = True - else: - results = results + byte - return results - -def link_send(sock, m): - sock.sendall(m.to_request().to_string()) - -def link_recv(reader): - status = reader.readline() - if not(status.startswith("HTTP/1.1 200 OK")): - raise Bad_status(status) - content_length = None - eoh = False - while not eoh: - header = reader.readline().strip() - if header == "": - eoh = True - else: - bits = header.split(":") - key = string.lower(bits[0]) - if key == "content-length": - content_length = int(bits[1]) - if content_length == None: - raise Missing_content_length() - body = reader.read(content_length) - return Http_response(body) - -def login(sock, reader, some_credential): - link_send(sock, Login(some_credential)) - link_recv(reader) - -def create(sock, reader, name = None): - link_send(sock, Create_request(name)) - return Create_response.of_response(link_recv(reader)).name - -def subscribe(sock, reader, name): - link_send(sock, Subscribe(name)) - link_recv(reader) - -def send(sock, reader, name, msg): - link_send(sock, Send(name, msg)) - link_recv(reader) - -def transfer(sock, reader, ack_to, timeout): - link_send(sock, Transfer_request(ack_to, timeout)) - return Transfer_response.of_response(link_recv(reader)).messages - -def ack(sock, reader, id): - link_send(sock, Ack(id)) - link_recv(reader) - -from threading import Thread, Event, Lock - -class Receiver(Thread): - def __init__(self, sock, reader, server): - Thread.__init__(self) - self.daemon = True - self.sock = sock - self.reader = reader - self.server = server - self.events = {} - self.replies = {} - def register_correlation_id(self, correlation_id): - event = Event() - self.events[correlation_id] = event - return event - def get_reply(self, correlation_id): - reply = self.replies[correlation_id] - del self.replies[correlation_id] - return reply - def set_listen_callback(self, listen_callback): - self.listen_callback = listen_callback - def run(self): - ack_to = -1L - timeout = 5.0 - while True: - messages = transfer(self.sock, self.reader, ack_to, timeout) - for id in messages.keys(): - ack_to = max(ack_to, id) - m = messages[id] - reply_to = m.reply_to - if reply_to: - reply = self.server.dispatch(m) - send(self.sock, self.reader, reply_to, reply) - ack(self.sock, self.reader, id) - else: - if m.correlation_id not in self.events: - print >>sys.stderr, "Unknown correlation_id: %d" % m.correlation_id - else: - self.replies[m.correlation_id] = m.payload - event = self.events[m.correlation_id] - del self.events[m.correlation_id] - event.set() - -class Connection: - def __init__(self, client, name): - self.client = client - self.name = name - def rpc(self, request): - return self.client.rpc(self.name, request) - -class Server: - def __init__(self): - pass - def dispatch(self, request): - # echo the request back - request.reply_to = None - return request - -class Switch: - def __init__(self, some_credential, config = default_config, server = Server()): - self.some_credential = some_credential - self.config = config - self.server = server - - # Open a connection for requests and one for events - self.request_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.request_sock.connect((config["ip"], config["port"])) - self.request_stream_reader = StreamReader(self.request_sock) - self.request_mutex = Lock() - login(self.request_sock, self.request_stream_reader, some_credential) - - self.event_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.event_sock.connect((config["ip"], config["port"])) - self.event_stream_reader = StreamReader(self.event_sock) - login(self.event_sock, self.event_stream_reader, some_credential) - - self.receiver_thread = Receiver(self.event_sock, self.event_stream_reader, self.server) - self.receiver_thread.start() - self.next_correlation_id = 0 - self.next_correlation_id_mutex = Lock() - - def correlation_id(self): - self.next_correlation_id_mutex.acquire() - try: - correlation_id = self.next_correlation_id - self.next_correlation_id = self.next_correlation_id + 1 - return correlation_id - finally: - self.next_correlation_id_mutex.release() - - def rpc(self, name, request): - correlation_id = self.correlation_id() - event = self.receiver_thread.register_correlation_id(correlation_id) - - self.request_mutex.acquire() - try: - reply_queue = create(self.request_sock, self.request_stream_reader) - subscribe(self.request_sock, self.request_stream_reader, reply_queue) - send(self.request_sock, self.request_stream_reader, name, Message(request, correlation_id, reply_queue)) - finally: - self.request_mutex.release() - - event.wait() - return self.receiver_thread.get_reply(correlation_id) - - def connect(self, service): - self.request_mutex.acquire() - try: - create(self.request_sock, self.request_stream_reader, service) - finally: - self.request_mutex.release() - - return Connection(self, service) - - def listen(self, service): - self.request_mutex.acquire() - try: - create(self.request_sock, self.request_stream_reader, service) - subscribe(self.request_sock, self.request_stream_reader, service) - finally: - self.request_mutex.release() - - -if __name__ == "__main__": - from optparse import OptionParser - import sys, time - - parser = OptionParser() - parser.add_option("-x", "--switch", dest="switch", type="string", - help="address of message switch", metavar="SWITCH") - parser.add_option("-l", "--listen", dest="listen", action="store_true", - help="listen for RPCs, instead of sending them") - parser.add_option("-s", "--service", dest="service", type="string", - help="name of the remote service") - parser.add_option("-c", "--client", dest="client_name", type="string", - help="name which identifies this client") - - (options, args) = parser.parse_args() - config = default_config - if options.switch: - bits = options.switch.split(":") - config["ip"] = bits[0] - if len(bits) == 2: - config["port"] = int(bits[1]) - - client_name = "test_python" - if options.client_name: - client_name = options.client_name - if not options.service: - print >> sys.stderr, "Must provide a --service name" - sys.exit(1) - - if options.listen: - s = Switch(client_name, server = Server()) - s.listen(options.service) - while True: - time.sleep(5) - else: - s = Switch(client_name) - c = s.connect(options.service) - print c.rpc("hello") diff --git a/ocaml/networkd/test/dune b/ocaml/networkd/test/dune index 951eda074a0..9d7ac2c9248 100644 --- a/ocaml/networkd/test/dune +++ b/ocaml/networkd/test/dune @@ -25,3 +25,5 @@ ) ) ) + +(data_only_dirs jsonrpc_files) diff --git a/ocaml/quicktest/dune b/ocaml/quicktest/dune index 9a8a4a75043..b061ff1176c 100644 --- a/ocaml/quicktest/dune +++ b/ocaml/quicktest/dune @@ -40,7 +40,7 @@ xenctrl xml-light2 ) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv)) + (preprocess (per_module ((pps ppx_deriving_rpc) Quicktest_vm_lifecycle))) ) diff --git a/ocaml/quicktest/qt.ml b/ocaml/quicktest/qt.ml index 7485cef15d4..3b6b7cd9743 100644 --- a/ocaml/quicktest/qt.ml +++ b/ocaml/quicktest/qt.ml @@ -100,9 +100,9 @@ module Time = struct let now () = Unix.gettimeofday () - let of_field = Xapi_stdext_date.Date.to_float + let of_field = Xapi_stdext_date.Date.to_unix_time - let pp t = Xapi_stdext_date.Date.of_float t |> Xapi_stdext_date.Date.to_string + let pp t = Xapi_stdext_date.Date.(of_unix_time t |> to_rfc3339) let check t ~after ~before = Alcotest.(check bool) @@ -269,7 +269,7 @@ module VDI = struct ; ( `Same , "snapshot_time" , fun vdi -> - vdi.API.vDI_snapshot_time |> Xapi_stdext_date.Date.to_string + vdi.API.vDI_snapshot_time |> Xapi_stdext_date.Date.to_rfc3339 ) ; (`Same, "virtual_size", fun vdi -> vdi.API.vDI_location) ] diff --git a/ocaml/quicktest/qt.mli b/ocaml/quicktest/qt.mli index 15dbb785f28..5ba4e8c68a0 100644 --- a/ocaml/quicktest/qt.mli +++ b/ocaml/quicktest/qt.mli @@ -34,7 +34,7 @@ module Time : sig val now : unit -> t - val of_field : Xapi_stdext_date.Date.iso8601 -> t + val of_field : Xapi_stdext_date.Date.t -> t val pp : t -> string diff --git a/ocaml/quicktest/quicktest b/ocaml/quicktest/quicktest index 89fa7927fef..0388c907ef0 100644 --- a/ocaml/quicktest/quicktest +++ b/ocaml/quicktest/quicktest @@ -1,5 +1,13 @@ #!/bin/bash ulimit -n 2048 + +# By default make the tests run 10x as many iterations as the default they +# would've run in the CI +# XenRT can further override this env var if desired +# For this to have an effect tests must NOT specify a long_factor of their own. +QCHECK_LONG_FACTOR=${QCHECK_LONG_FACTOR:=10} +export QCHECK_LONG_FACTOR +echo "QCHECK_LONG_FACTOR: ${QCHECK_LONG_FACTOR}" # Run quicktest with support for exception backtraces. OCAMLRUNPARAM=b "@OPTDIR@/debug/quicktestbin" "$@" diff --git a/ocaml/quicktest/quicktest_date.ml b/ocaml/quicktest/quicktest_date.ml index 7a7e6b7ba5e..19ff9153088 100644 --- a/ocaml/quicktest/quicktest_date.ml +++ b/ocaml/quicktest/quicktest_date.ml @@ -3,9 +3,7 @@ module Date = Xapi_stdext_date.Date let test_host_get_server_localtime rpc session_id () = let host = Client.Host.get_by_uuid ~rpc ~session_id ~uuid:Qt.localhost_uuid in - let (_ : Date.iso8601) = - Client.Host.get_server_localtime ~rpc ~session_id ~host - in + let (_ : Date.t) = Client.Host.get_server_localtime ~rpc ~session_id ~host in () let test_message_get_since rpc session_id () = @@ -14,7 +12,7 @@ let test_message_get_since rpc session_id () = Forkhelpers.execute_command_get_output "/bin/date" [Printf.sprintf "+%s" format'; "-d"; "yesterday"] in - let yesterday = String.trim stdout |> Date.of_string in + let yesterday = String.trim stdout |> Date.of_iso8601 in let (_ : ('a API.Ref.t * API.message_t) list) = Client.Message.get_since ~rpc ~session_id ~since:yesterday in @@ -25,6 +23,7 @@ let test_message_get_since rpc session_id () = ; "%Y-%m-%dT%H:%M:%S" ; "%Y%m%dT%H:%M:%SZ" ; "%Y%m%dT%H:%M:%S" + ; "%Y%m%dT%H:%M:%S-12:00" ] |> List.iter test_with_format diff --git a/ocaml/quicktest/quicktest_event.ml b/ocaml/quicktest/quicktest_event.ml index f844db3e72c..a99f71bf752 100644 --- a/ocaml/quicktest/quicktest_event.ml +++ b/ocaml/quicktest/quicktest_event.ml @@ -381,7 +381,7 @@ let event_message_test rpc session_id () = ) ; let messages = Client.Client.Message.get ~rpc ~session_id ~cls ~obj_uuid - ~since:Xapi_stdext_date.Date.never + ~since:Xapi_stdext_date.Date.epoch in let has_msg m = List.exists (fun (r, _) -> r = m) messages in Alcotest.(check bool) diff --git a/ocaml/quicktest/quicktest_vdi.ml b/ocaml/quicktest/quicktest_vdi.ml index a648495eced..a11e1b2ce05 100644 --- a/ocaml/quicktest/quicktest_vdi.ml +++ b/ocaml/quicktest/quicktest_vdi.ml @@ -139,7 +139,7 @@ let vdi_bad_introduce rpc session_id sr_info () = ~location:(Ref.string_of (Ref.make ())) ~xenstore_data:[] ~sm_config:[] ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.never + ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.epoch ~snapshot_of:Ref.null in Alcotest.fail @@ -161,7 +161,7 @@ let vdi_bad_introduce rpc session_id sr_info () = ~other_config:[] ~location:vdir.API.vDI_location ~xenstore_data:[] ~sm_config:[] ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.never + ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.epoch ~snapshot_of:Ref.null in Alcotest.fail diff --git a/ocaml/rrd2csv/dune b/ocaml/rrd2csv/dune new file mode 100644 index 00000000000..97dab88ae44 --- /dev/null +++ b/ocaml/rrd2csv/dune @@ -0,0 +1 @@ +(data_only_dirs man) diff --git a/ocaml/sdk-gen/README.md b/ocaml/sdk-gen/README.md index fa45a1c3803..1cb1f2a7238 100644 --- a/ocaml/sdk-gen/README.md +++ b/ocaml/sdk-gen/README.md @@ -9,7 +9,7 @@ XenAPI's datamodel. The generation code is written in OCaml and is contained in this directory. The Python module is not auto-generated, it can be found at -[XenAPI.py](../../scripts/examples/python/XenAPI/XenAPI.py). +[XenAPI.py](../../python3/examples/XenAPI/XenAPI.py). To compile the generated source code, follow the instructions in the corresponding `README` files. diff --git a/ocaml/sdk-gen/c/autogen/dune b/ocaml/sdk-gen/c/autogen/dune index e7809a95ba5..78b81f38e4c 100644 --- a/ocaml/sdk-gen/c/autogen/dune +++ b/ocaml/sdk-gen/c/autogen/dune @@ -23,3 +23,4 @@ ) ) +(data_only_dirs src include) diff --git a/ocaml/sdk-gen/c/autogen/src/xen_common.c b/ocaml/sdk-gen/c/autogen/src/xen_common.c index 43b039db6f3..0dbf576aefd 100644 --- a/ocaml/sdk-gen/c/autogen/src/xen_common.c +++ b/ocaml/sdk-gen/c/autogen/src/xen_common.c @@ -950,7 +950,26 @@ static void parse_into(xen_session *s, xmlNode *value_node, { struct tm tm; memset(&tm, 0, sizeof(tm)); - strptime((char *)string, "%Y%m%dT%H:%M:%S", &tm); + // We only support basic ISO8601 since the C SDK only + // connects to the XML-RPC backend + char *formats[] = { + // no dashes, no colons + "%Y%m%dT%H%M%S", + // no dashes, with colons + "%Y%m%dT%H:%M:%S", + // dashes and colons + "%Y-%m-%dT%H:%M:%S", + }; + int num_formats = sizeof(formats) / sizeof(formats[0]); + + for (int i = 0; i < num_formats; i++) + { + if (strptime((char *)string, formats[i], &tm) != NULL) + { + break; + } + } + ((time_t *)value)[slot] = (time_t)mktime(&tm); free(string); } diff --git a/ocaml/sdk-gen/c/dune b/ocaml/sdk-gen/c/dune index 79cb32b80c6..ca7f44dee18 100644 --- a/ocaml/sdk-gen/c/dune +++ b/ocaml/sdk-gen/c/dune @@ -19,3 +19,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/c/templates/Makefile.mustache b/ocaml/sdk-gen/c/templates/Makefile.mustache index ac78e5ca1e6..939b54ad565 100644 --- a/ocaml/sdk-gen/c/templates/Makefile.mustache +++ b/ocaml/sdk-gen/c/templates/Makefile.mustache @@ -40,11 +40,9 @@ endif CFLAGS = -g -Iinclude \ $(shell xml2-config --cflags) \ - $(shell curl-config --cflags) \ -W -Wall -Wmissing-prototypes -Werror -std=c99 $(POS_FLAG) LDFLAGS = -g $(shell xml2-config --libs) \ - $(shell curl-config --libs) \ -Wl,-rpath,$(shell pwd) $(CYGWIN_LIBXML) # -h for Solaris diff --git a/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx b/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx index 737b0a20d65..7562889272e 100644 --- a/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx +++ b/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx @@ -354,16 +354,6 @@ Your current role is not authorized to perform this action. Action: {0} - - - Your current role is not authorized to perform this action. -Current Role: {0} -Authorized Roles: {1} - - - Your current role is not authorized to perform this action on {2}. -Current Role: {0} -Authorized Roles: {1} Cannot restore on this server because it was saved on an incompatible version diff --git a/ocaml/sdk-gen/csharp/autogen/dune b/ocaml/sdk-gen/csharp/autogen/dune index d5e542936ad..61e1f86a0a4 100644 --- a/ocaml/sdk-gen/csharp/autogen/dune +++ b/ocaml/sdk-gen/csharp/autogen/dune @@ -12,4 +12,6 @@ LICENSE (source_tree .) ) -) \ No newline at end of file +) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/csharp/autogen/src/Converters.cs b/ocaml/sdk-gen/csharp/autogen/src/Converters.cs index 2c4e4ba0df7..32b02d987a6 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/Converters.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/Converters.cs @@ -385,16 +385,54 @@ public override object ReadJson(JsonReader reader, Type objectType, object exist internal class XenDateTimeConverter : IsoDateTimeConverter { - private static readonly string[] DateFormatsUniversal = - { - "yyyyMMddTHH:mm:ssZ", "yyyy-MM-ddThh:mm:ssZ" + string [] DateFormatsUtc = { + // dashes and colons + "yyyy-MM-ddTHH:mm:ssZ", + "yyyy-MM-ddTHH:mm:ss.fffZ", + + // no dashes, with colons + "yyyyMMddTHH:mm:ssZ", + "yyyyMMddTHH:mm:ss.fffZ", + + // no dashes + "yyyyMMddTHHmmssZ", + "yyyyMMddTHHmmss.fffZ", }; - private static readonly string[] DateFormatsOther = + string[] DateFormatsLocal = { - "yyyyMMddTHH:mm:ss", + // no dashes + "yyyyMMddTHHmmss.fffzzzz", + "yyyyMMddTHHmmss.fffzzz", + "yyyyMMddTHHmmss.fffzz", + "yyyyMMddTHHmmss.fff", + + "yyyyMMddTHHmmsszzzz", "yyyyMMddTHHmmsszzz", - "yyyyMMddTHHmmsszz" + "yyyyMMddTHHmmsszz", + "yyyyMMddTHHmmss", + + // no dashes, with colons + "yyyyMMddTHH:mm:ss.fffzzzz", + "yyyyMMddTHH:mm:ss.fffzzz", + "yyyyMMddTHH:mm:ss.fffzz", + "yyyyMMddTHH:mm:ss.fff", + + "yyyyMMddTHH:mm:sszzzz", + "yyyyMMddTHH:mm:sszzz", + "yyyyMMddTHH:mm:sszz", + "yyyyMMddTHH:mm:ss", + + // dashes and colons + "yyyy-MM-ddTHH:mm:ss.fffzzzz", + "yyyy-MM-ddTHH:mm:ss.fffzzz", + "yyyy-MM-ddTHH:mm:ss.fffzz", + "yyyy-MM-ddTHH:mm:ss.fff", + + "yyyy-MM-ddTHH:mm:sszzzz", + "yyyy-MM-ddTHH:mm:sszzz", + "yyyy-MM-ddTHH:mm:sszz", + "yyyy-MM-ddTHH:mm:ss", }; public override object ReadJson(JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer) @@ -403,11 +441,11 @@ public override object ReadJson(JsonReader reader, Type objectType, object exist DateTime result; - if (DateTime.TryParseExact(str, DateFormatsUniversal, CultureInfo.InvariantCulture, + if (DateTime.TryParseExact(str, DateFormatsUtc, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal, out result)) return result; - if (DateTime.TryParseExact(str, DateFormatsOther, CultureInfo.InvariantCulture, + if (DateTime.TryParseExact(str, DateFormatsLocal, CultureInfo.InvariantCulture, DateTimeStyles.None, out result)) return result; @@ -420,7 +458,7 @@ public override void WriteJson(JsonWriter writer, object value, JsonSerializer s { var dateTime = (DateTime)value; dateTime = dateTime.ToUniversalTime(); - var text = dateTime.ToString(DateFormatsUniversal[0], CultureInfo.InvariantCulture); + var text = dateTime.ToString(DateFormatsUtc[0], CultureInfo.InvariantCulture); writer.WriteValue(text); return; } diff --git a/ocaml/sdk-gen/csharp/autogen/src/Failure.cs b/ocaml/sdk-gen/csharp/autogen/src/Failure.cs index b877dfa6de1..923c5488d4e 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/Failure.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/Failure.cs @@ -1,18 +1,18 @@ /* * Copyright (c) Cloud Software Group, Inc. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1) Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * + * * 2) Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -141,7 +141,6 @@ where trimmed.Length > 0 //call these before setting the shortError because they modify the errorText ParseSmapiV3Failures(); - ParseCslgFailures(); try { @@ -181,57 +180,6 @@ private void ParseSmapiV3Failures() } } - /// - /// The ErrorDescription[2] of Cslg failures contains embedded xml. - /// This method parses it and copies the user friendly part to errorText. - /// - private void ParseCslgFailures() - { - /* ErrorDescription[2] example: - - - Host ivory has not yet been added to the service. [err=Object was not found] - - 6 - - Host ivory has not yet been added to the service. [err=Object was not found] - 2 - CXSSHostUtil::getHost - 113 - .\\xss_util_host.cpp - - - */ - - if (ErrorDescription.Count > 2 && ErrorDescription[2] != null && ErrorDescription[0] != null && ErrorDescription[0].StartsWith("SR_BACKEND_FAILURE")) - { - Match m = Regex.Match(ErrorDescription[2], @".*", RegexOptions.Singleline); - - if (m.Success) - { - XmlDocument doc = new XmlDocument(); - - try - { - doc.LoadXml(m.Value); - } - catch (XmlException) - { - return; - } - - XmlNodeList nodes = doc.SelectNodes("/StorageLinkServiceError/Fault"); - - if (nodes != null && nodes.Count > 0 && !string.IsNullOrEmpty(nodes[0].InnerText)) - { - errorText = string.IsNullOrEmpty(errorText) - ? nodes[0].InnerText - : string.Format("{0} ({1})", errorText, nodes[0].InnerText); - } - } - } - } - public override string ToString() { return Message; diff --git a/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs b/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs index 71c9ea81f4c..519cc430d4e 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs @@ -304,15 +304,17 @@ protected virtual void PerformPostRequest(Stream postStream, Stream responseStre { webResponse = (HttpWebResponse)webRequest.GetResponse(); - ResponseHeaders = new Dictionary(); + var newResponseHeaders = new Dictionary(); if (webResponse.Headers != null) { var keys = webResponse.Headers.AllKeys; foreach (var key in keys) - ResponseHeaders.Add(key, string.Join(",", webResponse.Headers.Get(key))); + newResponseHeaders.Add(key, string.Join(",", webResponse.Headers.Get(key))); } + ResponseHeaders = newResponseHeaders; + if (webResponse.StatusCode != HttpStatusCode.OK) throw new WebException(webResponse.StatusCode.ToString()); diff --git a/ocaml/sdk-gen/csharp/dune b/ocaml/sdk-gen/csharp/dune index e7112b1aae9..df6856bfc22 100644 --- a/ocaml/sdk-gen/csharp/dune +++ b/ocaml/sdk-gen/csharp/dune @@ -46,3 +46,4 @@ (action (run %{x} -s %{y})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml index ff390468130..edaa3a7c7f9 100644 --- a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml +++ b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml @@ -1206,7 +1206,7 @@ and get_default_value_opt field = Printf.sprintf "DateTime.ParseExact(\"%s\", \"yyyyMMddTHH:mm:ssZ\", \ CultureInfo.InvariantCulture)" - (Date.to_string y) + (Date.to_rfc3339 y) ] | VEnum y -> [enum_of_wire y] diff --git a/ocaml/sdk-gen/dune b/ocaml/sdk-gen/dune new file mode 100644 index 00000000000..49140147129 --- /dev/null +++ b/ocaml/sdk-gen/dune @@ -0,0 +1 @@ +(data_only_dirs component-test) diff --git a/ocaml/sdk-gen/go/autogen/dune b/ocaml/sdk-gen/go/autogen/dune index c1cb1ddd3b8..98bbd45a418 100644 --- a/ocaml/sdk-gen/go/autogen/dune +++ b/ocaml/sdk-gen/go/autogen/dune @@ -22,3 +22,5 @@ (source_tree .) ) ) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/go/dune b/ocaml/sdk-gen/go/dune index 6d99103516a..de55ec5cee8 100644 --- a/ocaml/sdk-gen/go/dune +++ b/ocaml/sdk-gen/go/dune @@ -44,3 +44,5 @@ (source_tree templates) ) ) + +(data_only_dirs test_data templates) diff --git a/ocaml/sdk-gen/go/templates/ConvertTime.mustache b/ocaml/sdk-gen/go/templates/ConvertTime.mustache index d1f18643057..d6f0e2a63d5 100644 --- a/ocaml/sdk-gen/go/templates/ConvertTime.mustache +++ b/ocaml/sdk-gen/go/templates/ConvertTime.mustache @@ -1,5 +1,32 @@ {{#serialize}} -var timeFormats = []string{time.RFC3339, "20060102T15:04:05Z", "20060102T15:04:05"} +var timeFormats = []string{ + time.RFC3339, + "2006-01-02T15:04:05", + + // no dashes, no colons + "20060102T15:04:05Z", + "20060102T15:04:05", + "20060102T150405.999999999Z0700", + "20060102T150405", + "20060102T150405Z07", + "20060102T150405Z07:00", + + // no dashes, with colons + "20060102T15:04:05Z07", + "20060102T15:04:05Z0700", + "20060102T15:04:05Z07:00", + "20060102T15:04:05.999999999Z07", + "20060102T15:04:05.999999999Z07:00", + "20060102T15:04:05.999999999Z07", + + // dashes and colon patterns not covered by `time.RFC3339` + "2006-01-02T15:04:05Z07", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05.999999999Z07", + "2006-01-02T15:04:05.999999999Z07:00", + "2006-01-02T15:04:05.999999999Z07", +} //nolint:unparam func serialize{{func_name_suffix}}(context string, value {{type}}) (string, error) { diff --git a/ocaml/sdk-gen/go/test_data/time_convert.go b/ocaml/sdk-gen/go/test_data/time_convert.go index 7bbdf602ced..d9d5483d5b3 100644 --- a/ocaml/sdk-gen/go/test_data/time_convert.go +++ b/ocaml/sdk-gen/go/test_data/time_convert.go @@ -1,4 +1,31 @@ -var timeFormats = []string{time.RFC3339, "20060102T15:04:05Z", "20060102T15:04:05"} +var timeFormats = []string{ + time.RFC3339, + "2006-01-02T15:04:05", + + // no dashes, no colons + "20060102T15:04:05Z", + "20060102T15:04:05", + "20060102T150405.999999999Z0700", + "20060102T150405", + "20060102T150405Z07", + "20060102T150405Z07:00", + + // no dashes, with colons + "20060102T15:04:05Z07", + "20060102T15:04:05Z0700", + "20060102T15:04:05Z07:00", + "20060102T15:04:05.999999999Z07", + "20060102T15:04:05.999999999Z07:00", + "20060102T15:04:05.999999999Z07", + + // dashes and colon patterns not covered by `time.RFC3339` + "2006-01-02T15:04:05Z07", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05.999999999Z07", + "2006-01-02T15:04:05.999999999Z07:00", + "2006-01-02T15:04:05.999999999Z07", +} //nolint:unparam func serializeTime(context string, value time.Time) (string, error) { diff --git a/ocaml/sdk-gen/java/autogen/dune b/ocaml/sdk-gen/java/autogen/dune index ba31f05eaaf..0d4efe16d03 100644 --- a/ocaml/sdk-gen/java/autogen/dune +++ b/ocaml/sdk-gen/java/autogen/dune @@ -5,3 +5,4 @@ ) ) +(data_only_dirs xen-api) diff --git a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java index a0e9bff1a3d..3ba135e0a40 100644 --- a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java +++ b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java @@ -37,21 +37,97 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; +import java.util.TimeZone; /** - * {@link CustomDateDeserializer} is a Jackson JSON deserializer for parsing {@link Date} objects + * {@link CustomDateDeserializer} is a Jackson JSON deserializer for parsing + * {@link Date} objects * from custom date formats used in Xen-API responses. */ public class CustomDateDeserializer extends StdDeserializer { /** - * Array of {@link SimpleDateFormat} objects representing the custom date formats - * used in XenServer API responses. + * Array of {@link SimpleDateFormat} objects representing the date formats + * used in xen-api responses. + * + * RFC-3339 date formats can be returned in either Zulu or time zone agnostic. + * This list is not an exhaustive list of formats supported by RFC-3339, rather + * a set of formats that will enable the deserialization of xen-api dates. + * Formats are listed in order of decreasing precision. When adding + * to this list, please ensure the order is kept. */ - private final SimpleDateFormat[] dateFormatters - = new SimpleDateFormat[]{ + private static final SimpleDateFormat[] dateFormatsUtc = { + // Most commonly returned formats + new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"), + new SimpleDateFormat("ss.SSS"), + + // Other + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"), new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss'Z'"), - new SimpleDateFormat("ss.SSS") + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSS'Z'"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSS'Z'"), + + }; + + /** + * Array of {@link SimpleDateFormat} objects representing the date formats for + * local time. + * These formats are used to parse dates in local time zones. + * Formats are listed in order of decreasing precision. When adding + * to this list, please ensure the order is kept. + */ + private static final SimpleDateFormat[] dateFormatsLocal = { + // no dashes, no colons + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSXXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSS"), + + new SimpleDateFormat("yyyyMMdd'T'HHmmssZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssXXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss"), + + // no dashes, with colons + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSXXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSS"), + + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssXXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss"), + + // dashes and colons + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"), + + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"), }; /** @@ -62,28 +138,42 @@ public CustomDateDeserializer() { } /** - * Constructs a {@link CustomDateDeserializer} instance with the specified value type. + * Constructs a {@link CustomDateDeserializer} instance with the specified value + * type. * * @param t The value type to handle (can be null, handled by superclass) */ public CustomDateDeserializer(Class t) { super(t); + var utcTimeZone = TimeZone.getTimeZone("UTC"); + for (var utcFormatter : dateFormatsUtc) { + utcFormatter.setTimeZone(utcTimeZone); + } } /** * Deserializes a {@link Date} object from the given JSON parser. * - * @param jsonParser The JSON parser containing the date value to deserialize + * @param jsonParser The JSON parser containing the date value to + * deserialize * @param deserializationContext The deserialization context * @return The deserialized {@link Date} object * @throws IOException if an I/O error occurs during deserialization */ @Override public Date deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { + var text = jsonParser.getText(); + for (SimpleDateFormat formatter : dateFormatsUtc) { + try { + return formatter.parse(text); + } catch (ParseException e) { + // ignore + } + } - for (SimpleDateFormat formatter : dateFormatters) { + for (SimpleDateFormat formatter : dateFormatsLocal) { try { - return formatter.parse(jsonParser.getText()); + return formatter.parse(text); } catch (ParseException e) { // ignore } diff --git a/ocaml/sdk-gen/java/dune b/ocaml/sdk-gen/java/dune index 498b3a7bc09..a1daac834b0 100644 --- a/ocaml/sdk-gen/java/dune +++ b/ocaml/sdk-gen/java/dune @@ -29,3 +29,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 483d8689db1..58254d3517b 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -33,10 +33,6 @@ let api = (*Here we extract a list of objs (look in datamodel_types.ml for the structure definitions)*) let classes = objects_of_api api -let print_license file = - output_string file Licence.bsd_two_clause ; - output_string file "\n\n" - (*How shall we translate datamodel identifiers into Java, with its conventions about case, and reserved words?*) let reserved_words = function @@ -99,6 +95,15 @@ let camel_case s = in keyword_map result +let rec set_is_last params acc = + match params with + | [] -> + [] + | `O last :: [] -> + `O (("is_last", `Bool true) :: last) :: acc + | `O h :: tail -> + `O (("is_last", `Bool false) :: h) :: set_is_last tail acc + let exception_class_case x = String.concat "" (List.map @@ -113,18 +118,58 @@ let enums = Hashtbl.create 10 let records = Hashtbl.create 10 -(*We want an empty mutable set to keep the types in.*) +(** Module Ty: Representation an empty mutable set to keep the types in. *) module Ty = struct type t = DT.ty - let compare = compare + (** [stringify_type ty] converts a type [ty] into its string representation. + This aids in comparisons for the [compare] function. For generating string types + please use [get_java_type] instead. + @param ty The type to convert into a string representation. + @return A string representing the type [ty]. *) + let rec stringify_type ty = + match ty with + | SecretString | String -> + "String" + | Int -> + "Long" + | Float -> + "Double" + | Bool -> + "Boolean" + | DateTime -> + "Date" + | Enum (name, _) -> + sprintf "Types.%s" name + | Set t1 -> + sprintf "Set<%s>" (stringify_type t1) + | Map (t1, t2) -> + sprintf "Map<%s, %s>" (stringify_type t1) (stringify_type t2) + | Ref x -> + x + | Record x -> + sprintf "%s.Record" x + | Option x -> + stringify_type x + + (** [compare a1 a2] compares two types [a1] and [a2] based on their string representations. + It first converts the types into strings using [stringify_type], then compares the strings. + @param a1 The first type to compare. + @param a2 The second type to compare. + @return An integer representing the result of the comparison: + - 0 if [a1] is equal to [a2]. + - a negative integer if [a1] is less than [a2]. + - a positive integer if [a1] is greater than [a2]. *) + let compare a1 a2 = String.compare (stringify_type a1) (stringify_type a2) end module TypeSet = Set.Make (Ty) let types = ref TypeSet.empty -(* Helper functions for types *) +(***************************************) +(* Helpers for generating Types.java *) +(***************************************) let rec get_java_type ty = types := TypeSet.add ty !types ; match ty with @@ -152,12 +197,9 @@ let rec get_java_type ty = | Option x -> get_java_type x -(*We'd like the list of XenAPI objects to appear as an enumeration so we can*) -(* switch on them, so add it using this mechanism*) let switch_enum = Enum ("XenAPIObjects", List.map (fun x -> (x.name, x.description)) classes) -(*Helper function for get_marshall_function*) let rec get_marshall_function_rec = function | SecretString | String -> "String" @@ -184,27 +226,14 @@ let rec get_marshall_function_rec = function | Option ty -> get_marshall_function_rec ty -(*get_marshall_function (Set(Map(Float,Bool)));; -> "toSetOfMapOfDoubleBoolean"*) let get_marshall_function ty = "to" ^ get_marshall_function_rec ty -let _ = get_java_type switch_enum - -(* Generate the methods *) - let get_java_type_or_void = function | None -> "void" | Some (ty, _) -> get_java_type ty -(* Here are a lot of functions which ask questions of the messages associated with*) -(* objects, the answers to which are helpful when generating the corresponding java*) -(* functions. For instance is_method_static takes an object's message, and*) -(* determines whether it should be static or not in java, by looking at whether*) -(* it has a self parameter or not.*) - -(*Similar functions for deprecation of methods*) - let get_method_deprecated_release_name message = match message.msg_release.internal_deprecated_since with | Some version -> @@ -212,264 +241,6 @@ let get_method_deprecated_release_name message = | None -> None -let get_method_deprecated_annotation message = - match get_method_deprecated_release_name message with - | Some version -> - {|@Deprecated(since = "|} ^ version ^ {|")|} - | None -> - "" - -let get_method_param {param_type= ty; param_name= name; _} = - let ty = get_java_type ty in - let name = camel_case name in - sprintf "%s %s" ty name - -let get_method_params_for_signature params = - String.concat ", " ("Connection c" :: List.map get_method_param params) - -let get_method_params_for_xml message params = - let f = function - | {param_type= Record _; param_name= name; _} -> - camel_case name ^ "_map" - | {param_name= name; _} -> - camel_case name - in - match params with - | [] -> - if is_method_static message then - [] - else - ["this.ref"] - | _ -> - if is_method_static message then - List.map f params - else - "this.ref" :: List.map f params - -let rec range = function 0 -> [] | i -> range (i - 1) @ [i] - -(* Here is the main method generating function.*) -let gen_method file cls message params async_version = - let return_type = - if - String.lowercase_ascii cls.name = "event" - && String.lowercase_ascii message.msg_name = "from" - then - "EventBatch" - else - get_java_type_or_void message.msg_result - in - let method_static = if is_method_static message then "static " else "" in - let method_name = camel_case message.msg_name in - let paramString = get_method_params_for_signature params in - let default_errors = - [ - ( "BadServerResponse" - , "Thrown if the response from the server contains an invalid status." - ) - ; ("XenAPIException", "if the call failed.") - ; ( "IOException" - , "if an error occurs during a send or receive. This includes cases \ - where a payload is invalid JSON." - ) - ] - in - let publishInfo = get_published_info_message message cls in - - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml message.msg_doc) ; - fprintf file " * Minimum allowed role: %s\n" - (get_minimum_allowed_role message) ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - let deprecated_info = - match get_method_deprecated_release_name message with - | Some version -> - " * @deprecated since " ^ version ^ "\n" - | None -> - "" - in - fprintf file "%s" deprecated_info ; - fprintf file " *\n" ; - fprintf file " * @param c The connection the call is made on\n" ; - - List.iter - (fun x -> - let paramPublishInfo = get_published_info_param message x in - fprintf file " * @param %s %s%s\n" (camel_case x.param_name) - (if x.param_doc = "" then "No description" else escape_xml x.param_doc) - (if paramPublishInfo = "" then "" else " " ^ paramPublishInfo) - ) - params ; - - ( if async_version then - fprintf file " * @return Task\n" - else - match message.msg_result with - | None -> - () - | Some (_, "") -> - fprintf file " * @return %s\n" - (get_java_type_or_void message.msg_result) - | Some (_, desc) -> - fprintf file " * @return %s\n" desc - ) ; - - List.iter - (fun x -> fprintf file " * @throws %s %s\n" (fst x) (snd x)) - default_errors ; - List.iter - (fun x -> - fprintf file " * @throws Types.%s %s\n" - (exception_class_case x.err_name) - x.err_doc - ) - message.msg_errors ; - - fprintf file " */\n" ; - - let deprecated_string = - match get_method_deprecated_annotation message with - | "" -> - "" - | other -> - " " ^ other ^ "\n" - in - if async_version then - fprintf file "%s public %sTask %sAsync(%s) throws\n" deprecated_string - method_static method_name paramString - else - fprintf file "%s public %s%s %s(%s) throws\n" deprecated_string - method_static return_type method_name paramString ; - - let all_errors = - List.map fst default_errors - @ List.map - (fun x -> "Types." ^ exception_class_case x.err_name) - message.msg_errors - in - fprintf file " %s {\n" (String.concat ",\n " all_errors) ; - - if async_version then - fprintf file " String methodCall = \"Async.%s.%s\";\n" - message.msg_obj_name message.msg_name - else - fprintf file " String methodCall = \"%s.%s\";\n" message.msg_obj_name - message.msg_name ; - - if message.msg_session then - fprintf file " String sessionReference = c.getSessionReference();\n" - else - () ; - - let record_params = - List.filter - (function {param_type= Record _; _} -> true | _ -> false) - message.msg_params - in - - List.iter - (fun {param_name= s; _} -> - let name = camel_case s in - fprintf file " var %s_map = %s.toMap();\n" name name - ) - record_params ; - - fprintf file " Object[] methodParameters = {" ; - - let methodParamsList = - if message.msg_session then - "sessionReference" :: get_method_params_for_xml message params - else - get_method_params_for_xml message params - in - - output_string file (String.concat ", " methodParamsList) ; - - fprintf file "};\n" ; - - if message.msg_result != None || async_version then - fprintf file " var typeReference = new TypeReference<%s>(){};\n" - (if async_version then "Task" else return_type) ; - - let last_statement = - match message.msg_result with - | None when not async_version -> - " c.dispatch(methodCall, methodParameters);\n" - | _ -> - " return c.dispatch(methodCall, methodParameters, typeReference);\n" - in - fprintf file "%s" last_statement ; - - fprintf file " }\n\n" - -(*Some methods have an almost identical asynchronous counterpart, which returns*) -(* a Task reference rather than its usual return value*) -let gen_method_and_asynchronous_counterpart file cls message = - let generator x = - if message.msg_async then gen_method file cls message x true ; - gen_method file cls message x false - in - match message.msg_params with - | [] -> - generator [] - | _ -> - let paramGroups = gen_param_groups message message.msg_params in - List.iter generator paramGroups - -(* Generate the record *) - -(* The fields of an object are stored in trees in the datamodel, which means that*) -(* the next three functions, which are conceptually for generating the fields*) -(* of each class, and for the corresponding entries in the toString and toMap*) -(* functions are in fact implemented as three sets of three mutual recursions,*) -(* which take the trees apart. *) - -let gen_record_field file prefix field cls = - let ty = get_java_type field.ty in - let full_name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name = camel_case full_name in - let publishInfo = get_published_info_field field cls in - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml field.field_description) ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - fprintf file " */\n" ; - fprintf file " @JsonProperty(\"%s\")\n" full_name ; - - if field.lifecycle.state = Lifecycle.Deprecated_s then - fprintf file " @Deprecated(since = \"%s\")\n" - (get_release_branding (get_deprecated_release field.lifecycle.transitions)) ; - - fprintf file " public %s %s;\n\n" ty name - -let rec gen_record_namespace file prefix (name, contents) cls = - List.iter (gen_record_contents file (name :: prefix) cls) contents - -and gen_record_contents file prefix cls = function - | Field f -> - gen_record_field file prefix f cls - | Namespace (n, cs) -> - gen_record_namespace file prefix (n, cs) cls - -(***) - -let gen_record_tostring_field file prefix field = - let name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name = camel_case name in - fprintf file - " print.printf(\"%%1$20s: %%2$s\\n\", \"%s\", this.%s);\n" name - name - -let rec gen_record_tostring_namespace file prefix (name, contents) = - List.iter (gen_record_tostring_contents file (name :: prefix)) contents - -and gen_record_tostring_contents file prefix = function - | Field f -> - gen_record_tostring_field file prefix f - | Namespace (n, cs) -> - gen_record_tostring_namespace file prefix (n, cs) - -(***) - let field_default = function | SecretString | String -> {|""|} @@ -496,621 +267,554 @@ let field_default = function | Option _ -> "null" -let gen_record_tomap_field file prefix field = - let name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name' = camel_case name in - let default = field_default field.ty in - fprintf file " map.put(\"%s\", this.%s == null ? %s : this.%s);\n" - name name' default name' - -let rec gen_record_tomap_contents file prefix = function - | Field f -> - gen_record_tomap_field file prefix f - | Namespace (n, cs) -> - List.iter (gen_record_tomap_contents file (n :: prefix)) cs - -(*Generate the Record subclass for the given class, with its toString and toMap*) -(* methods. We're also modifying the records hash table as a side effect*) - -let gen_record file cls = - let class_name = class_case cls.name in - let _ = Hashtbl.replace records cls.name cls.contents in - let contents = cls.contents in - fprintf file " /**\n" ; - fprintf file " * Represents all the fields in a %s\n" class_name ; - fprintf file " */\n" ; - fprintf file " public static class Record implements Types.Record {\n" ; - fprintf file " public String toString() {\n" ; - fprintf file " StringWriter writer = new StringWriter();\n" ; - fprintf file " PrintWriter print = new PrintWriter(writer);\n" ; - - List.iter (gen_record_tostring_contents file []) contents ; - (*for the Event.Record, we have to add in the snapshot field by hand, because it's not in the data model!*) - if cls.name = "event" then - fprintf file - " print.printf(\"%%1$20s: %%2$s\\n\", \"snapshot\", \ - this.snapshot);\n" ; - - fprintf file " return writer.toString();\n" ; - fprintf file " }\n\n" ; - fprintf file " /**\n" ; - fprintf file " * Convert a %s.Record to a Map\n" cls.name ; - fprintf file " */\n" ; - fprintf file " public Map toMap() {\n" ; - fprintf file " var map = new HashMap();\n" ; - - List.iter (gen_record_tomap_contents file []) contents ; - if cls.name = "event" then - fprintf file " map.put(\"snapshot\", this.snapshot);\n" ; - - fprintf file " return map;\n" ; - fprintf file " }\n\n" ; - - List.iter (gen_record_contents file [] cls) contents ; - if cls.name = "event" then ( - fprintf file " /**\n" ; - fprintf file - " * The record of the database object that was added, changed or \ - deleted\n" ; - fprintf file - " * (the actual type will be VM.Record, VBD.Record or similar)\n" ; - fprintf file " */\n" ; - fprintf file " public Object snapshot;\n" - ) ; - - fprintf file " }\n\n" - -(* Generate the class *) - let class_is_empty cls = cls.contents = [] -let gen_class cls folder = - let class_name = class_case cls.name in - let methods = cls.messages in - let file = open_out (Filename.concat folder class_name ^ ".java") in - let publishInfo = get_published_info_class cls in - print_license file ; - fprintf file - {|package com.xensource.xenapi; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonValue; -import com.fasterxml.jackson.core.type.TypeReference; -import com.xensource.xenapi.Types.BadServerResponse; -import com.xensource.xenapi.Types.XenAPIException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.*; -import java.io.IOException; - -|} ; - fprintf file "/**\n" ; - fprintf file " * %s\n" cls.description ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - fprintf file " *\n" ; - fprintf file " * @author Cloud Software Group, Inc.\n" ; - fprintf file " */\n" ; - fprintf file "public class %s extends XenAPIObject {\n\n" class_name ; - - if class_is_empty cls then - fprintf file - " @JsonValue\n\ - \ public String toWireString() {\n\ - \ return null;\n\ - \ }\n\n" - else ( - fprintf file " /**\n" ; - fprintf file " * The XenAPI reference (OpaqueRef) to this object.\n" ; - fprintf file " */\n" ; - fprintf file " protected final String ref;\n\n" ; - fprintf file " /**\n" ; - fprintf file " * For internal use only.\n" ; - fprintf file " */\n" ; - fprintf file " %s(String ref) {\n" class_name ; - fprintf file " this.ref = ref;\n" ; - fprintf file " }\n\n" ; - fprintf file " /**\n" ; - fprintf file - " * @return The XenAPI reference (OpaqueRef) to this object.\n" ; - fprintf file " */\n" ; - fprintf file " @JsonValue\n" ; - fprintf file " public String toWireString() {\n" ; - fprintf file " return this.ref;\n" ; - fprintf file " }\n\n" - ) ; - - if not (class_is_empty cls) then ( - fprintf file " /**\n" ; - fprintf file - " * If obj is a %s, compares XenAPI references for equality.\n" - class_name ; - fprintf file " */\n" ; - fprintf file " @Override\n" ; - fprintf file " public boolean equals(Object obj)\n" ; - fprintf file " {\n" ; - fprintf file " if (obj instanceof %s)\n" class_name ; - fprintf file " {\n" ; - fprintf file " %s other = (%s) obj;\n" class_name class_name ; - fprintf file " return other.ref.equals(this.ref);\n" ; - fprintf file " } else\n" ; - fprintf file " {\n" ; - fprintf file " return false;\n" ; - fprintf file " }\n" ; - fprintf file " }\n\n" ; - - (*hashcode*) - fprintf file " @Override\n" ; - fprintf file " public int hashCode()\n" ; - fprintf file " {\n" ; - fprintf file " return ref.hashCode();\n" ; - fprintf file " }\n\n" ; - flush file ; - gen_record file cls ; - flush file - ) ; - - List.iter (gen_method_and_asynchronous_counterpart file cls) methods ; - - flush file ; - fprintf file "}" ; - close_out file - -(**?*) -(* Generate Marshalling Class *) - -(*This generates the special case code for marshalling the snapshot field in an Event.Record*) -let generate_snapshot_hack file = - fprintf file "\n" ; - fprintf file "\n" ; - fprintf file " Object a,b;\n" ; - fprintf file " a=map.get(\"snapshot\");\n" ; - fprintf file " switch(%s(record.clazz))\n" - (get_marshall_function switch_enum) ; - fprintf file " {\n" ; - List.iter - (fun x -> - fprintf file " case %17s: b = %25s(a); break;\n" - (String.uppercase_ascii x) - (get_marshall_function (Record x)) - ) - (List.map - (fun x -> x.name) - (List.filter (fun x -> not (class_is_empty x)) classes) - ) ; - fprintf file - " default: throw new RuntimeException(\"Internal error in \ - auto-generated code whilst unmarshalling event snapshot\");\n" ; - fprintf file " }\n" ; - fprintf file " record.snapshot = b;\n" +let generate_snapshot_hack = + {| + Object a,b; + a = map.get("snapshot"); + switch(|} + ^ get_marshall_function switch_enum + ^ {|(record.clazz)){ +|} + ^ String.concat "\n" + (List.map + (fun x -> + " case " + ^ String.uppercase_ascii x + ^ ": b = " + ^ get_marshall_function (Record x) + ^ "(a); break;" + ) + (List.map + (fun x -> x.name) + (List.filter (fun x -> not (class_is_empty x)) classes) + ) + ) + ^ {| + default: + throw new RuntimeException("Internal error in auto-generated code whilst unmarshalling event snapshot"); + } + record.snapshot = b;|} -let gen_marshall_record_field file prefix field = +let gen_marshall_record_field prefix field = let ty = get_marshall_function field.ty in let name = String.concat "_" (List.rev (field.field_name :: prefix)) in let name' = camel_case name in - fprintf file " record.%s = %s(map.get(\"%s\"));\n" name' ty name + " record." ^ name' ^ " = " ^ ty ^ "(map.get(\"" ^ name ^ "\"));" -let rec gen_marshall_record_namespace file prefix (name, contents) = - List.iter (gen_marshall_record_contents file (name :: prefix)) contents +let rec gen_marshall_record_namespace prefix (name, contents) = + String.concat "\n" + (List.map (gen_marshall_record_contents (name :: prefix)) contents) -and gen_marshall_record_contents file prefix = function +and gen_marshall_record_contents prefix = function | Field f -> - gen_marshall_record_field file prefix f + gen_marshall_record_field prefix f | Namespace (n, cs) -> - gen_marshall_record_namespace file prefix (n, cs) ; - () - -(*Every type which may be returned by a function may also be the result of the*) -(* corresponding asynchronous task. We therefore need to generate corresponding*) -(* marshalling functions which can take the raw xml of the tasks result field*) -(* and turn it into the corresponding type. Luckily, the only things returned by*) -(* asynchronous tasks are object references and strings, so rather than implementing*) -(* the general recursive structure we'll just make one for each of the classes*) -(* that's been registered as a marshall-needing type*) - -let generate_reference_task_result_func file clstr = - fprintf file - {| /** - * Attempt to convert the {@link Task}'s result to a {@link %s} object. - * Will return null if the method cannot fetch a valid value from the {@link Task} object. - * @param task The task from which to fetch the result. - * @param connection The connection - * @return the instantiated object if a valid value was found, null otherwise. - * @throws BadServerResponse Thrown if the response from the server contains an invalid status. - * @throws XenAPIException if the call failed. - * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON. - */ -|} - clstr ; - fprintf file - " public static %s to%s(Task task, Connection connection) throws \ - IOException {\n" - clstr clstr ; - fprintf file - " return Types.to%s(parseResult(task.getResult(connection)));\n" - clstr ; - fprintf file " }\n" ; - fprintf file "\n" - -let gen_task_result_func file = function - | Ref ty -> - generate_reference_task_result_func file (class_case ty) - | _ -> - () + gen_marshall_record_namespace prefix (n, cs) (*don't generate for complicated types. They're not needed.*) -let rec gen_marshall_body file = function +let rec gen_marshall_body = function | SecretString | String -> - fprintf file " return (String) object;\n" + "return (String) object;" | Int -> - fprintf file " return Long.valueOf((String) object);\n" + "return Long.valueOf((String) object);" | Float -> - fprintf file " return (Double) object;\n" + "return (Double) object;" | Bool -> - fprintf file " return (Boolean) object;\n" + "return (Boolean) object;" | DateTime -> - fprintf file - " try {\n\ - \ return (Date) object;\n\ - \ } catch (ClassCastException e){\n\ - \ //Occasionally the date comes back as an ocaml float \ - rather than\n\ - \ //in the xmlrpc format! Catch this and convert.\n\ - \ return (new Date((long) (1000*Double.parseDouble((String) \ - object))));\n\ - \ }\n" + {| + try { + return (Date) object; + } catch (ClassCastException e){ + //Occasionally the date comes back as an ocaml float rather than + //in the xmlrpc format! Catch this and convert. + return (new Date((long) (1000*Double.parseDouble((String) object)))); + }|} | Ref ty -> - fprintf file " return new %s((String) object);\n" (class_case ty) + "return new " ^ class_case ty ^ "((String) object);" | Enum (name, _) -> - fprintf file " try {\n" ; - fprintf file - " return %s.valueOf(((String) \ - object).toUpperCase().replace('-','_'));\n" - (class_case name) ; - fprintf file " } catch (IllegalArgumentException ex) {\n" ; - fprintf file " return %s.UNRECOGNIZED;\n" (class_case name) ; - fprintf file " }\n" + {|try { + return |} + ^ class_case name + ^ {|.valueOf(((String) object).toUpperCase().replace('-','_')); + } catch (IllegalArgumentException ex) { + return |} + ^ class_case name + ^ {|.UNRECOGNIZED; + }|} | Set ty -> let ty_name = get_java_type ty in let marshall_fn = get_marshall_function ty in - fprintf file " Object[] items = (Object[]) object;\n" ; - fprintf file " Set<%s> result = new LinkedHashSet<>();\n" ty_name ; - fprintf file " for(Object item: items) {\n" ; - fprintf file " %s typed = %s(item);\n" ty_name marshall_fn ; - fprintf file " result.add(typed);\n" ; - fprintf file " }\n" ; - fprintf file " return result;\n" + {|Object[] items = (Object[]) object; + Set<|} + ^ ty_name + ^ {|> result = new LinkedHashSet<>(); + for(Object item: items) { + |} + ^ ty_name + ^ {| typed = |} + ^ marshall_fn + ^ {|(item); + result.add(typed); + } + return result;|} | Map (ty, ty') -> let ty_name = get_java_type ty in let ty_name' = get_java_type ty' in let marshall_fn = get_marshall_function ty in let marshall_fn' = get_marshall_function ty' in - fprintf file " var map = (Map)object;\n" ; - fprintf file " var result = new HashMap<%s,%s>();\n" ty_name - ty_name' ; - fprintf file " for(var entry: map.entrySet()) {\n" ; - fprintf file " var key = %s(entry.getKey());\n" marshall_fn ; - fprintf file " var value = %s(entry.getValue());\n" - marshall_fn' ; - fprintf file " result.put(key, value);\n" ; - fprintf file " }\n" ; - fprintf file " return result;\n" + {|var map = (Map)object; + var result = new HashMap<|} + ^ ty_name + ^ {|,|} + ^ ty_name' + ^ {|>(); + for(var entry: map.entrySet()) { + var key = |} + ^ marshall_fn + ^ {|(entry.getKey()); + var value = |} + ^ marshall_fn' + ^ {|(entry.getValue()); + result.put(key, value); + } + return result;|} | Record ty -> let contents = Hashtbl.find records ty in let cls_name = class_case ty in - fprintf file - " Map map = (Map) object;\n" ; - fprintf file " %s.Record record = new %s.Record();\n" cls_name - cls_name ; - List.iter (gen_marshall_record_contents file []) contents ; - (*Event.Record needs a special case to handle snapshots*) - if ty = "event" then generate_snapshot_hack file ; - fprintf file " return record;\n" + "Map map = (Map) object;\n" + ^ " " + ^ cls_name + ^ {|.Record record = new |} + ^ cls_name + ^ ".Record();\n" + ^ String.concat "\n" (List.map (gen_marshall_record_contents []) contents) + ^ ( if + (*Event.Record needs a special case to handle snapshots*) + ty = "event" + then + generate_snapshot_hack + else + "" + ) + ^ " \n return record;" | Option ty -> - gen_marshall_body file ty - -let rec gen_marshall_func file ty = - match ty with - | Option x -> - if TypeSet.mem x !types then - () - else - gen_marshall_func file ty - | _ -> - let type_string = get_java_type ty in - fprintf file - {| /** - * Converts an {@link Object} to a {@link %s} object. - *
- * This method takes an {@link Object} as input and attempts to convert it into a {@link %s} object. - * If the input object is null, the method returns null. Otherwise, it creates a new {@link %s} - * object using the input object's {@link String} representation. - *
- * @param object The {@link Object} to be converted to a {@link %s} object. - * @return A {@link %s} object created from the input {@link Object}'s {@link String} representation, - * or null if the input object is null. - * @deprecated this method will not be publicly exposed in future releases of this package. - */ - @Deprecated -|} - type_string type_string type_string type_string type_string ; - let fn_name = get_marshall_function ty in - - if match ty with Map _ | Record _ -> true | _ -> false then - fprintf file " @SuppressWarnings(\"unchecked\")\n" ; - - fprintf file " public static %s %s(Object object) {\n" type_string - fn_name ; - fprintf file " if (object == null) {\n" ; - fprintf file " return null;\n" ; - fprintf file " }\n" ; - gen_marshall_body file ty ; - fprintf file " }\n\n" -(***) - -let gen_enum file name ls = - let name = class_case name in - let ls = - ("UNRECOGNIZED", "The value does not belong to this enumeration") :: ls - in - fprintf file " public enum %s {\n" name ; - let to_member_declaration (name, description) = - let escaped_description = - global_replace (regexp_string "*/") "* /" description - in - let final_description = - global_replace (regexp_string "\n") "\n * " escaped_description - in - let comment = - String.concat "\n" - [" /**"; " * " ^ final_description; " */"] - in - let json_property = - if name != "UNRECOGNIZED" then - {|@JsonProperty("|} ^ name ^ {|")|} - else - "@JsonEnumDefaultValue" - in - comment ^ "\n " ^ json_property ^ "\n " ^ enum_of_wire name - in - fprintf file "%s" (String.concat ",\n" (List.map to_member_declaration ls)) ; - fprintf file ";\n" ; - fprintf file " public String toString() {\n" ; - List.iter - (fun (enum, _) -> - fprintf file " if (this == %s) return \"%s\";\n" - (enum_of_wire enum) enum - ) - ls ; - fprintf file " /* This can never be reached */\n" ; - fprintf file " return \"UNRECOGNIZED\";\n" ; - fprintf file " }\n" ; - fprintf file "\n }\n\n" - -let gen_enums file = Hashtbl.iter (gen_enum file) enums + gen_marshall_body ty let gen_error_field_name field = camel_case (String.concat "_" (Astring.String.cuts ~sep:" " field)) -let gen_error_field_names fields = List.map gen_error_field_name fields - -let gen_error_fields file field = - fprintf file " public final String %s;\n" field +let populate_releases templdir class_dir = + render_file + ("APIVersion.mustache", "APIVersion.java") + json_releases templdir class_dir -let gen_error file name params = - let name = exception_class_case name in - let fields = gen_error_field_names params.err_params in - let constructor_params = - String.concat ", " (List.map (fun field -> "String " ^ field) fields) +(****************************************************) +(* Populate JSON object for the Types.java template *) +(****************************************************) +let get_types_errors_json = + let list_errors = + Hashtbl.fold (fun k v acc -> (k, v) :: acc) Datamodel.errors [] in + List.map + (fun (_, error) -> + let class_name = exception_class_case error.err_name in + let err_params = + List.mapi + (fun index value -> + `O + [ + ("name", `String (gen_error_field_name value)) + ; ("index", `Float (Int.to_float (index + 1))) + ; ("last", `Bool (index == List.length error.err_params - 1)) + ] + ) + error.err_params + in + `O + [ + ("name", `String error.err_name) + ; ("description", `String (escape_xml error.err_doc)) + ; ("class_name", `String class_name) + ; ("err_params", `A err_params) + ] + ) + list_errors + |> List.rev + +let get_types_enums_json types = + List.map + (fun (_, enum_name, enum_values) -> + let class_name = class_case enum_name in + let mapped_values = + List.map + (fun (name, description) -> + let escaped_description = + global_replace (regexp_string "*/") "* /" description + in + let final_description = + global_replace (regexp_string "\n") "\n * " + escaped_description + in + `O + [ + ("name", `String name) + ; ("name_uppercase", `String (enum_of_wire name)) + ; ("description", `String final_description) + ] + ) + enum_values + in + let mapped_values_with_is_last = set_is_last mapped_values [] in + `O + [ + ("class_name", `String class_name) + ; ("values", `A mapped_values_with_is_last) + ] + ) + types + +let get_types_json types = + List.map + (fun t -> + let type_string = get_java_type t in + let class_name = class_case type_string in + let method_name = get_marshall_function t in + (*Every type which may be returned by a function may also be the result of the*) + (* corresponding asynchronous task. We therefore need to generate corresponding*) + (* marshalling functions which can take the raw xml of the tasks result field*) + (* and turn it into the corresponding type. Luckily, the only things returned by*) + (* asynchronous tasks are object references and strings, so rather than implementing*) + (* the general recursive structure we'll just make one for each of the classes*) + (* that's been registered as a marshall-needing type*) + let generate_reference_task_result_func = + match t with Ref _ -> true | _ -> false + in + `O + [ + ("name", `String type_string) + ; ("class_name", `String class_name) + ; ("method_name", `String method_name) + ; ( "suppress_unchecked_warning" + , `Bool + ( match t with + | Map _ | Record _ | Option (Record _) | Option (Map _) -> + true + | _ -> + false + ) + ) + ; ( "generate_reference_task_result_func" + , `Bool generate_reference_task_result_func + ) + ; ("method_body", `String (gen_marshall_body t)) + ] + ) + types + +let populate_types types templdir class_dir = + (* we manually add switch_enum here so it's added as an enum in Types.java *) + let list_types = TypeSet.fold (fun t acc -> t :: acc) !types [switch_enum] in + let sort_types ty1 ty2 = Ty.compare ty1 ty2 in + let list_sorted_types = List.sort sort_types list_types in + let list_sorted_enums = + List.filter_map + (fun x -> match x with Enum (name, ls) -> Some (x, name, ls) | _ -> None) + list_sorted_types + in + let types_json = get_types_json list_sorted_types in + let errors = get_types_errors_json in + let enums = get_types_enums_json list_sorted_enums in + let json = + `O [("errors", `A errors); ("enums", `A enums); ("types", `A types_json)] + in + render_file ("Types.mustache", "Types.java") json templdir class_dir + +(***************************************) +(* Helpers for generating class methods *) +(***************************************) +let get_message_return_type cls message is_method_async = + if is_method_async then + "Task" + else if + String.lowercase_ascii cls.name = "event" + && String.lowercase_ascii message.msg_name = "from" + then + "EventBatch" + else + get_java_type_or_void message.msg_result - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml params.err_doc) ; - fprintf file " */\n" ; - fprintf file " public static class %s extends XenAPIException {\n" name ; - - List.iter (gen_error_fields file) fields ; - - fprintf file "\n /**\n" ; - fprintf file " * Create a new %s\n" name ; - fprintf file " */\n" ; - fprintf file " public %s(%s) {\n" name constructor_params ; - fprintf file " super(\"%s\");\n" (escape_xml params.err_doc) ; +let get_message_return_description message = + match message.msg_result with + | None -> + get_java_type_or_void message.msg_result + | Some (_, description) -> + description - List.iter (fun s -> fprintf file " this.%s = %s;\n" s s) fields ; +let get_message_return_parameters message = + List.map + (fun parameter -> + `O [("name_camel", `String (camel_case parameter.param_name))] + ) + (List.filter + (function {param_type= Record _; _} -> true | _ -> false) + message.msg_params + ) - fprintf file " }\n\n" ; - fprintf file " }\n\n" +let get_message_deprecation_info message = + let is_deprecated = + match message.msg_release.internal_deprecated_since with + | Some _ -> + true + | None -> + false + in + let deprecated_release = + match get_method_deprecated_release_name message with + | Some v -> + get_release_branding v + | None -> + "" + in + (is_deprecated, deprecated_release) -let gen_method_error_throw file name error = - let class_name = exception_class_case name in - let paramsStr = - String.concat ", " - (List.map - (fun i -> sprintf "p%i" i) - (range (List.length error.err_params)) +let get_message_type_reference is_method_async message return_type = + if is_method_async then + "Task" + else if message.msg_result != None then + return_type + else + "" + +let get_message_formatted_parameters parameters message = + List.map + (fun parameter -> + let publish_info = get_published_info_param message parameter in + let name_camel = camel_case parameter.param_name in + let description = escape_xml parameter.param_doc in + `O + [ + ("type", `String (get_java_type parameter.param_type)) + ; ( "is_record" + , `Bool + (match parameter.param_type with Record _ -> true | _ -> false) + ) + ; ("name_camel", `String name_camel) + ; ( "description" + , `String (if description = "" then "No description" else description) + ) + ; ("publish_info", `String publish_info) + ] + ) + parameters + +let get_message_errors message = + let error_definitions = + List.map + (fun error -> + let exception_name = exception_class_case error.err_name in + ("Types." ^ exception_name, escape_xml error.err_doc) ) + message.msg_errors in - - fprintf file " if (errorName.equals(\"%s\")){\n" name ; - - (* Prepare the parameters to the Exception constructor *) - List.iter - (fun i -> - fprintf file - " String p%i = errorData.length > %i ? errorData[%i] : \"\";\n" - i i i + List.map + (fun (name, description) -> + `O [("name", `String name); ("description", `String description)] ) - (range (List.length error.err_params)) ; - - fprintf file " throw new Types.%s(%s);\n" class_name paramsStr ; - fprintf file " }\n" - -let gen_types_class folder = - let class_name = "Types" in - let file = open_out (Filename.concat folder class_name ^ ".java") in - print_license file ; - fprintf file - {|package com.xensource.xenapi; -import java.util.*; -import com.fasterxml.jackson.annotation.JsonEnumDefaultValue; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.io.IOException; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * This class holds enum types and exceptions. - */ -public class Types -{ - /** - * Interface for all Record classes - */ - public interface Record - { - /** - * Convert a Record to a Map - */ - Map toMap(); - } - /** - * Base class for all XenAPI Exceptions - */ - public static class XenAPIException extends IOException { - public final String shortDescription; - public final String[] errorDescription; - XenAPIException(String shortDescription) - { - this.shortDescription = shortDescription; - this.errorDescription = null; - } - XenAPIException(String[] errorDescription) - { - this.errorDescription = errorDescription; - if (errorDescription.length > 0) - { - shortDescription = errorDescription[0]; - } else - { - shortDescription = ""; - } - } - public String toString() - { - if (errorDescription == null) - { - return shortDescription; - } else if (errorDescription.length == 0) - { - return ""; - } - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < errorDescription.length - 1; i++) - { - sb.append(errorDescription[i]); - } - sb.append(errorDescription[errorDescription.length - 1]); - return sb.toString(); - } - } - - /** - * Thrown if the response from the server contains an invalid status. - */ - public static class BadServerResponse extends XenAPIException - { - public BadServerResponse(JsonRpcResponseError responseError) - { - super(String.valueOf(responseError)); - } - } -|} ; - - fprintf file - {| /** - * Checks the provided server response was successful. If the call - * failed, throws a XenAPIException. If the server - * returned an invalid response, throws a BadServerResponse. - * Otherwise, returns the server response as passed in. - */ - public static void checkError(JsonRpcResponseError response) throws XenAPIException, BadServerResponse - { - var errorData = response.data; - if(errorData.length == 0){ - throw new BadServerResponse(response); - } - var errorName = response.message; -|} ; - - Hashtbl.iter (gen_method_error_throw file) Datamodel.errors ; - - fprintf file - {| - // An unknown error occurred - throw new Types.XenAPIException(errorData); -} - -|} ; - - gen_enums file ; - fprintf file "\n" ; - Hashtbl.iter (gen_error file) Datamodel.errors ; - fprintf file "\n" ; - TypeSet.iter (gen_marshall_func file) !types ; - fprintf file "\n" ; - TypeSet.iter (gen_task_result_func file) !types ; - fprintf file - {| - - public static class BadAsyncResult extends XenAPIException - { - public final String result; - - public BadAsyncResult(String result) - { - super(result); - this.result = result; - } - } - - private static String parseResult(String result) throws BadAsyncResult - { - Pattern pattern = Pattern.compile("(.*)"); - Matcher matcher = pattern.matcher(result); - if (!matcher.find() || matcher.groupCount() != 1) { - throw new Types.BadAsyncResult("Can't interpret: " + result); - } - - return matcher.group(1); - } + error_definitions + +let get_message_method_parameters parameters is_static message = + let session_parameter = + `O + [ + ("type", `String "String") + ; ("is_record", `Bool false) + ; ("name_camel", `String "sessionReference") + ; ("description", `String "") + ; ("publish_info", `String "") + ] + in + let non_static_reference_parameter = + `O + [ + ("type", `String "String") + ; ("is_record", `Bool false) + ; ("name_camel", `String "this.ref") + ; ("description", `String "") + ; ("publish_info", `String "") + ] + in + let extra_method_parameters = + match (message.msg_session, is_static) with + | true, true -> + [session_parameter] + | true, false -> + [session_parameter; non_static_reference_parameter] + | false, true -> + [] + | false, false -> + [non_static_reference_parameter] + in + set_is_last (extra_method_parameters @ parameters) [] + +let get_class_message_json cls message async_version params = + let is_method_async = async_version in + let return_type = get_message_return_type cls message is_method_async in + let return_description = get_message_return_description message in + let returns_void = message.msg_result = None && not async_version in + let record_parameters = get_message_return_parameters message in + let is_deprecated, deprecated_release = + get_message_deprecation_info message + in + let type_reference = + get_message_type_reference is_method_async message return_type + in + let parameters = get_message_formatted_parameters params message in + let errors = get_message_errors message in + let is_static = is_method_static message in + let method_parameters = + get_message_method_parameters parameters is_static message + in + `O + [ + ("return_type", `String return_type) + ; ("is_async", `Bool async_version) + ; ("return_description", `String return_description) + ; ("returns_void", `Bool returns_void) + ; ("is_static", `Bool is_static) + ; ("name_camel", `String (camel_case message.msg_name)) + ; ("name", `String message.msg_name) + ; ("publish_info", `String (get_published_info_message message cls)) + ; ("description", `String (escape_xml message.msg_doc)) + ; ("minimum_allowed_role", `String (get_minimum_allowed_role message)) + ; ("object_name", `String message.msg_obj_name) + ; ("supports_session", `Bool message.msg_session) + ; ("record_parameters", `A record_parameters) + ; ("is_deprecated", `Bool is_deprecated) + ; ("deprecated_release", `String deprecated_release) + ; ("type_reference", `String type_reference) + ; ("parameters", `A parameters) + ; ("method_parameters", `A method_parameters) + ; ("errors", `A errors) + ] - public static EventBatch toEventBatch(Object object) { - if (object == null) { - return null; - } - Map map = (Map) object; - EventBatch batch = new EventBatch(); - batch.token = toString(map.get("token")); - batch.validRefCounts = map.get("valid_ref_counts"); - batch.events = toSetOfEventRecord(map.get("events")); - return batch; - } -} -|} +let get_class_fields_json cls = + Hashtbl.replace records cls.name cls.contents ; + let rec content_fields content namespace_name = + match content with + | Field f -> + let name_with_prefix = + if namespace_name == "" then + f.field_name + else + namespace_name ^ "_" ^ f.field_name + in + let name_camel = camel_case name_with_prefix in + let ty = get_java_type f.ty in + let publish_info = get_published_info_field f cls in + let description = escape_xml f.field_description in + let is_deprecated = f.lifecycle.state = Lifecycle.Deprecated_s in + let deprecated_release = + if is_deprecated then + get_release_branding (get_deprecated_release f.lifecycle.transitions) + else + "" + in + [ + `O + [ + ("name", `String name_with_prefix) + ; ("name_camel", `String name_camel) + ; ("default_value", `String (field_default f.ty)) + ; ("description", `String description) + ; ("type", `String ty) + ; ("publish_info", `String publish_info) + ; ("is_deprecated", `Bool is_deprecated) + ; ("deprecated_release", `String deprecated_release) + ] + ] + | Namespace (name, contents) -> + List.flatten (List.map (fun c -> content_fields c name) contents) + in + List.flatten (List.map (fun c -> content_fields c "") cls.contents) + +(** [get_all_message_variants messages acc] takes a list of messages [messages] and an accumulator [acc], + and recursively constructs a list of tuples representing both asynchronous and synchronous variants of each message, + along with their associated parameters. If a message does not have an asynchronous version, this function simply returns + its synchronous version, with parameter information. + + For each message, if it has parameter information, the function generates all possible combinations of parameters + and pairs them with the message, marking each combination as either asynchronous or synchronous. Then, it constructs + a list of tuples containing each combination along with its associated message and its asynchronous/synchronous flag. + + @param messages a list of messages to process + @param acc an accumulator for collecting the constructed tuples + @return a list of tuples representing both asynchronous and synchronous variants of each message, + along with their associated parameters *) +let rec get_all_message_variants messages acc = + match messages with + | [] -> + acc + | h :: tail -> + let get_variants messages = + (* we get the param groups outside of the mapping because we know it's always the same message *) + let params = gen_param_groups h h.msg_params in + match params with + | [] -> + List.map + (fun (message, is_async) -> (message, is_async, [])) + messages + | _ -> + List.map + (fun (message, is_async) -> + List.map (fun param -> (message, is_async, param)) params + ) + messages + |> List.flatten + in + if h.msg_async then + get_variants [(h, false); (h, true)] @ get_all_message_variants tail acc + else + get_variants [(h, false)] @ get_all_message_variants tail acc -(* Now run it *) +let get_class_methods_json cls = + let messages = get_all_message_variants cls.messages [] in + List.map + (fun (message, async_version, params) -> + get_class_message_json cls message async_version params + ) + messages -let populate_releases templdir class_dir = - render_file - ("APIVersion.mustache", "APIVersion.java") - json_releases templdir class_dir +(***********************************************) +(* Populate JSON object for the class template *) +(***********************************************) +let populate_class cls templdir class_dir = + let class_name = class_case cls.name in + let fields = get_class_fields_json cls in + let methods = get_class_methods_json cls in + let json = + `O + [ + ("class_name", `String class_name) + ; ("description", `String cls.description) + ; ("publish_info", `String (get_published_info_class cls)) + ; ("is_empty_class", `Bool (class_is_empty cls)) + ; ("is_event_class", `Bool (cls.name = "event")) + ; ("fields", `A fields) + ; ("methods", `A methods) + ] + in + render_file ("Class.mustache", class_name ^ ".java") json templdir class_dir let _ = let templdir = "templates" in let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in - List.iter (fun x -> gen_class x class_dir) classes ; - gen_types_class class_dir ; populate_releases templdir class_dir ; + List.iter (fun cls -> populate_class cls templdir class_dir) classes ; + populate_types types templdir class_dir ; let uncommented_license = string_of_file "LICENSE" in let class_license = open_out "autogen/xen-api/src/main/resources/LICENSE" in diff --git a/ocaml/sdk-gen/java/templates/Class.mustache b/ocaml/sdk-gen/java/templates/Class.mustache new file mode 100644 index 00000000000..658deeb05f2 --- /dev/null +++ b/ocaml/sdk-gen/java/templates/Class.mustache @@ -0,0 +1,177 @@ +/* + * Copyright (c) Cloud Software Group, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1) Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +package com.xensource.xenapi; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import com.fasterxml.jackson.core.type.TypeReference; +import com.xensource.xenapi.Types.BadServerResponse; +import com.xensource.xenapi.Types.XenAPIException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.*; +import java.io.IOException; + +/** + * {{description}}{{#publish_info}} + * {{{publish_info}}}{{/publish_info}} + * + * @author Cloud Software Group, Inc. + */ +public class {{class_name}} extends XenAPIObject { + + {{#is_empty_class}} + @JsonValue + public String toWireString() { + return null; + } + + {{/is_empty_class}} + {{^is_empty_class}} + /** + * The XenAPI reference (OpaqueRef) to this object. + */ + protected final String ref; + + /** + * For internal use only. + */ + {{{class_name}}}(String ref) { + this.ref = ref; + } + + /** + * @return The XenAPI reference (OpaqueRef) to this object. + */ + @JsonValue + public String toWireString() { + return this.ref; + } + + /** + * If obj is a {{{class_name}}}, compares XenAPI references for equality. + */ + @Override + public boolean equals(Object obj) + { + if (obj instanceof {{{class_name}}}) + { + {{{class_name}}} other = ({{{class_name}}}) obj; + return other.ref.equals(this.ref); + } else + { + return false; + } + } + + @Override + public int hashCode() + { + return ref.hashCode(); + } + + /** + * Represents all the fields in a {{{class_name}}} + */ + public static class Record implements Types.Record { + public String toString() { + StringWriter writer = new StringWriter(); + PrintWriter print = new PrintWriter(writer); + {{#fields}} + print.printf("%1$20s: %2$s\n", "{{{name_camel}}}", this.{{{name_camel}}}); + {{/fields}} + {{#is_event_class}} + print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); + {{/is_event_class}} + return writer.toString(); + } + + /** + * Convert a {{{class_name}}}.Record to a Map + */ + public Map toMap() { + var map = new HashMap(); + {{#fields}} + map.put("{{{name}}}", this.{{{name_camel}}} == null ? {{{default_value}}} : this.{{{name_camel}}}); + {{/fields}} + return map; + } + + {{#fields}} + /** + * {{{description}}}{{#publish_info}} + * {{{publish_info}}}{{/publish_info}} + */ + @JsonProperty("{{{name}}}"){{#is_deprecated}} + @Deprecated(since = "{{{deprecated_release}}}"){{/is_deprecated}} + public {{{type}}} {{{name_camel}}}; + + {{/fields}} + {{#is_event_class}} + /** + * The record of the database object that was added, changed or deleted. + * The actual type will be VM.Record, VBD.Record, or similar. + */ + public Object snapshot; + {{/is_event_class}} + } + + {{/is_empty_class}} + {{#methods}} + /** + * {{{description}}} + * Minimum allowed role: {{{minimum_allowed_role}}} + * {{{publish_info}}}{{#is_deprecated}} + * @deprecated since {{{deprecated_release}}}{{/is_deprecated}} + * + * @param c The connection the call is made on{{#parameters}} + * @param {{{name_camel}}} {{{description}}} {{{publish_info}}}{{/parameters}}{{^returns_void}} + * @return {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_description}}}{{/is_async}}{{/returns_void}} + * @throws BadServerResponse Thrown if the response from the server contains an invalid status. + * @throws XenAPIException if the call failed. + * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON.{{#errors}} + * @throws {{{name}}} {{{description}}}{{/errors}} + */{{#is_deprecated}} + @Deprecated(since = "{{{deprecated_release}}}"){{/is_deprecated}} + public{{#is_static}} static{{/is_static}} {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_type}}}{{/is_async}} {{name_camel}}{{#is_async}}Async{{/is_async}}(Connection c{{#parameters}}, {{{type}}} {{{name_camel}}}{{/parameters}}) throws + BadServerResponse, + XenAPIException, + IOException{{#errors}}, + {{name}}{{/errors}} { + String methodCall = "{{#is_async}}Async.{{/is_async}}{{{object_name}}}.{{{name}}}";{{#supports_session}} + String sessionReference = c.getSessionReference();{{/supports_session}}{{#method_parameters}}{{#is_record}} + var {{{name_camel}}}_map = {{{name_camel}}}.toMap();{{/is_record}}{{/method_parameters}} + Object[] methodParameters = { {{#method_parameters}}{{{name_camel}}}{{#is_record}}_map{{/is_record}}{{^is_last}}, {{/is_last}}{{/method_parameters}} };{{#type_reference}} + var typeReference = new TypeReference<{{{.}}}>(){};{{/type_reference}} + {{^returns_void}}return {{/returns_void}}c.dispatch(methodCall, methodParameters{{#type_reference}}, typeReference{{/type_reference}}); + } + + {{/methods}} +} \ No newline at end of file diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache new file mode 100644 index 00000000000..4da97c774cd --- /dev/null +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -0,0 +1,253 @@ +/* + * Copyright (c) Cloud Software Group, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1) Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package com.xensource.xenapi; +import java.util.*; +import com.fasterxml.jackson.annotation.JsonEnumDefaultValue; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * This class holds enum types and exceptions. + */ +public class Types +{ + /** + * Interface for all Record classes + */ + public interface Record + { + /** + * Convert a Record to a Map + */ + Map toMap(); + } + /** + * Base class for all XenAPI Exceptions + */ + public static class XenAPIException extends IOException { + public final String shortDescription; + public final String[] errorDescription; + XenAPIException(String shortDescription) + { + this.shortDescription = shortDescription; + this.errorDescription = null; + } + XenAPIException(String[] errorDescription) + { + this.errorDescription = errorDescription; + if (errorDescription.length > 0) + { + shortDescription = errorDescription[0]; + } else + { + shortDescription = ""; + } + } + public String toString() + { + if (errorDescription == null) + { + return shortDescription; + } else if (errorDescription.length == 0) + { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < errorDescription.length - 1; i++) + { + sb.append(errorDescription[i]); + } + sb.append(errorDescription[errorDescription.length - 1]); + return sb.toString(); + } + } + + /** + * Thrown if the response from the server contains an invalid status. + */ + public static class BadServerResponse extends XenAPIException + { + public BadServerResponse(JsonRpcResponseError responseError) + { + super(String.valueOf(responseError)); + } + } + + /** + * Checks the provided server response was successful. If the call + * failed, throws a XenAPIException. If the server + * returned an invalid response, throws a BadServerResponse. + * Otherwise, returns the server response as passed in. + */ + public static void checkError(JsonRpcResponseError response) throws XenAPIException, BadServerResponse + { + var errorData = response.data; + if(errorData.length == 0){ + throw new BadServerResponse(response); + } + var errorName = response.message; + + {{#errors}} + if (errorName.equals("{{{name}}}")){ + {{#err_params}} + String p{{{index}}} = errorData.length > {{{index}}} ? errorData[{{{index}}}] : ""; + {{/err_params}} + throw new Types.{{{class_name}}}({{#err_params}}p{{{index}}}{{^last}}, {{/last}}{{/err_params}}); + } + {{/errors}} + + // An unknown error occurred + throw new Types.XenAPIException(errorData); + } + + {{#enums}} + public enum {{{class_name}}} { + /** + * The value does not belong to this enumeration + */ + @JsonEnumDefaultValue + UNRECOGNIZED, + {{#values}} + /** + * {{{description}}} + */ + @JsonProperty("{{{name}}}") + {{{name_uppercase}}}{{^is_last}},{{/is_last}}{{#is_last}};{{/is_last}} + {{/values}} + + public String toString() { + if (this == UNRECOGNIZED) return "UNRECOGNIZED";{{#values}} + if (this == {{{name_uppercase}}}) return "{{{name}}}";{{/values}} + /* This can never be reached */ + return "UNRECOGNIZED"; + } + } + + {{/enums}} + {{#errors}} + /** + * {{{description}}} + */ + public static class {{{class_name}}} extends XenAPIException { + {{#err_params}} + public final String {{{name}}}; + {{/err_params}} + + /** + * Create a new {{{class_name}}} + */ + public {{{class_name}}}({{#err_params}}String {{{name}}}{{^last}}, {{/last}}{{/err_params}}) { + super("{{{description}}}"); + {{#err_params}} + this.{{{name}}} = {{{name}}}; + {{/err_params}} + } + } + + {{/errors}} + public static class BadAsyncResult extends XenAPIException { + public final String result; + + /** + * Create a new BadAsyncResult + */ + public BadAsyncResult(String result) + { + super(result); + this.result = result; + } + } + + {{#types}} + /** + * Converts an {@link Object} to a {@link {{{name}}}} object. + *
+ * This method takes an {@link Object} as input and attempts to convert it into a {@link {{{name}}}} object. + * If the input object is null, the method returns null. Otherwise, it creates a new {@link {{{name}}}} + * object using the input object's {@link String} representation. + *
+ * @param object The {@link Object} to be converted to a {@link {{{name}}}} object. + * @return A {@link {{{name}}}} object created from the input {@link Object}'s {@link String} representation, + * or null if the input object is null. + * @deprecated this method will not be publicly exposed in future releases of this package. + */ + @Deprecated{{#suppress_unchecked_warning}} + @SuppressWarnings("unchecked"){{/suppress_unchecked_warning}} + public static {{{name}}} {{{method_name}}}(Object object) { + if (object == null) { + return null; + } + {{{method_body}}} + } + + {{/types}} + + {{#types}}{{#generate_reference_task_result_func}} + /** + * Attempt to convert the {@link Task}'s result to a {@link {{{name}}}} object. + * Will return null if the method cannot fetch a valid value from the {@link Task} object. + * @param task The task from which to fetch the result. + * @param connection The connection + * @return the instantiated object if a valid value was found, null otherwise. + * @throws BadServerResponse Thrown if the response from the server contains an invalid status. + * @throws XenAPIException if the call failed. + * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON. + */ + public static {{class_name}} to{{class_name}}(Task task, Connection connection) throws IOException { + return Types.to{{class_name}}(parseResult(task.getResult(connection))); + } + + {{/generate_reference_task_result_func}} + {{/types}} + private static String parseResult(String result) throws BadAsyncResult + { + Pattern pattern = Pattern.compile("(.*)"); + Matcher matcher = pattern.matcher(result); + if (!matcher.find() || matcher.groupCount() != 1) { + throw new Types.BadAsyncResult("Can't parse: " + result); + } + + return matcher.group(1); + } + + public static EventBatch toEventBatch(Object object) { + if (object == null) { + return null; + } + Map map = (Map) object; + EventBatch batch = new EventBatch(); + batch.token = toString(map.get("token")); + batch.validRefCounts = map.get("valid_ref_counts"); + batch.events = toSetOfEventRecord(map.get("events")); + return batch; + } +} diff --git a/ocaml/sdk-gen/powershell/autogen/dune b/ocaml/sdk-gen/powershell/autogen/dune index 56eb4c8480a..61e1f86a0a4 100644 --- a/ocaml/sdk-gen/powershell/autogen/dune +++ b/ocaml/sdk-gen/powershell/autogen/dune @@ -13,3 +13,5 @@ (source_tree .) ) ) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/powershell/dune b/ocaml/sdk-gen/powershell/dune index 39b2f99b75f..826885af543 100644 --- a/ocaml/sdk-gen/powershell/dune +++ b/ocaml/sdk-gen/powershell/dune @@ -19,3 +19,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/squeezed/dune b/ocaml/squeezed/dune new file mode 100644 index 00000000000..389b982cc01 --- /dev/null +++ b/ocaml/squeezed/dune @@ -0,0 +1 @@ +(data_only_dirs scripts) diff --git a/ocaml/tests/alerts/test_alert_certificate_check.ml b/ocaml/tests/alerts/test_alert_certificate_check.ml index b35dcd362a5..ce84cc672d2 100644 --- a/ocaml/tests/alerts/test_alert_certificate_check.ml +++ b/ocaml/tests/alerts/test_alert_certificate_check.ml @@ -14,7 +14,7 @@ open Certificate_check -let date_of = Xapi_stdext_date.Date.of_string +let date_of = Xapi_stdext_date.Date.of_iso8601 let check_time = date_of "20200201T02:00:00Z" diff --git a/ocaml/tests/alerts/test_daily_license_check.ml b/ocaml/tests/alerts/test_daily_license_check.ml index 866830b1059..067d93288ce 100644 --- a/ocaml/tests/alerts/test_daily_license_check.ml +++ b/ocaml/tests/alerts/test_daily_license_check.ml @@ -37,7 +37,7 @@ let expiry = Alcotest.testable pp_expiry equals let check_time = - Xapi_stdext_date.Date.(to_float (of_string "20160601T04:00:00Z")) + Xapi_stdext_date.Date.(to_unix_time (of_iso8601 "20160601T04:00:00Z")) let test_expiry ((pool_license_state, all_license_params), expected) () = let result = check_license check_time pool_license_state all_license_params in diff --git a/ocaml/tests/alerts/test_expiry_alert.ml b/ocaml/tests/alerts/test_expiry_alert.ml index ece31122b78..2c3fa283be8 100644 --- a/ocaml/tests/alerts/test_expiry_alert.ml +++ b/ocaml/tests/alerts/test_expiry_alert.ml @@ -14,7 +14,7 @@ open Expiry_alert -let date_of = Xapi_stdext_date.Date.of_string +let date_of = Xapi_stdext_date.Date.of_iso8601 let test_expired = ("TEST_EXPIRED", 1L) diff --git a/ocaml/tests/bench/bechamel_simple_cli.ml b/ocaml/tests/bench/bechamel_simple_cli.ml new file mode 100644 index 00000000000..e40399cf04d --- /dev/null +++ b/ocaml/tests/bench/bechamel_simple_cli.ml @@ -0,0 +1,153 @@ +open Bechamel +open Toolkit + +(* Bechamel doesn't provide before/after hooks, just allocate/free, but those are done outside the place where + Bechamel checks for GC live words stabilization. +*) +let before_after ~before ~after ~get ~label ~unit = + let shared_state = Atomic.make None and called = Atomic.make 0 in + let module BeforeAfter = struct + type witness = int Atomic.t + + let make () = Atomic.make 0 + + let load t = Atomic.set t 0 + + let unload _ = () + + let label _ = label + + let unit _ = unit + + let get _ = + (* + We get added to the instances both at the beginning and the end, so we get called 4 times: + + get () - 0: None -> state := before () + time () + get () - 1 + + benchmark_loop () + + get () - 2 + time () + get () - 3, after state, state := None + + We want the time measurement to be as close to the benchmark loop as possible, + so we perform operations only on call 1 and 4 + *) + let phase = Atomic.fetch_and_add called 1 mod 4 in + let old = Atomic.get shared_state in + match (old, phase) with + | None, 0 -> + before () |> Option.some |> Atomic.set shared_state ; + 0. + | Some state, (1 | 2) -> + get state + | Some state, 3 -> + let r = get state in + Atomic.set shared_state None ; + after state ; + r + | None, _ -> + assert false + | Some _, _ -> + assert false + end in + let measure = Measure.register (module BeforeAfter) in + Measure.instance (module BeforeAfter) measure + +let skip_label = "workload" + +let thread_workload ~before ~run ~after = + let before () = + let state = before () + and stop = Atomic.make false + and loops = Atomic.make 0 in + let thread_worker () = + while not (Atomic.get stop) do + Sys.opaque_identity (run state : unit) ; + Atomic.incr loops + done + in + let t = Thread.create thread_worker () in + (state, stop, loops, t) + and after (state, stop, _loops, worker) = + Atomic.set stop true ; Thread.join worker ; after state + and get (_, _, loops, _) = Atomic.fetch_and_add loops 1 |> float_of_int in + before_after ~before ~after ~get ~label:skip_label ~unit:"loops" + +(* based on bechamel example code *) + +(* For very short benchmarks ensure that they get to run long enough to switch threads + a few times. + Bechamel has both an iteration count and time limit, so this won't be a problem for slower benchmarks. +*) +let limit = 10_000_000 + +let benchmark ~instances tests = + let cfg = Benchmark.cfg ~limit ~quota:(Time.second 10.0) () in + Benchmark.all cfg instances tests + +let analyze ~instances raw_results = + let ols ~bootstrap = + Analyze.ols ~bootstrap ~r_square:true ~predictors:[|Measure.run|] + in + let results = + List.map + (fun instance -> + let f bootstrap = Analyze.all (ols ~bootstrap) instance raw_results in + try f 3000 with _ -> f 0 + ) + instances + in + (Analyze.merge (ols ~bootstrap:3000) instances results, raw_results) + +open Notty_unix + +let img (window, results) = + Bechamel_notty.Multiple.image_of_ols_results ~rect:window + ~predictor:Measure.run results + |> eol + +let not_workload measure = not (Measure.label measure = skip_label) + +let run_and_print instances tests = + let results, _ = + tests + |> benchmark ~instances + |> analyze ~instances:(List.filter not_workload instances) + in + let window = + match winsize Unix.stdout with + | Some (w, h) -> + {Bechamel_notty.w; h} + | None -> + {Bechamel_notty.w= 80; h= 1} + in + img (window, results) |> eol |> output_image ; + results + |> Hashtbl.iter @@ fun label results -> + if label = Measure.label Instance.monotonic_clock then + let units = Bechamel_notty.Unit.unit_of_label label in + results + |> Hashtbl.iter @@ fun name ols -> + Format.printf "%s (%s):@, %a@." name units Analyze.OLS.pp ols + +let cli ?(always = []) ?(workloads = []) tests = + let instances = + always + @ Instance.[monotonic_clock; minor_allocated; major_allocated] + @ always + in + List.iter (fun i -> Bechamel_notty.Unit.add i (Measure.unit i)) instances ; + Format.printf "@,Running benchmarks (no workloads)@." ; + run_and_print instances tests ; + + if workloads <> [] then ( + Format.printf "@,Running benchmarks (workloads)@." ; + List.iter (fun i -> Bechamel_notty.Unit.add i (Measure.unit i)) workloads ; + (* workloads come first, so that we unpause them in time *) + let instances = workloads @ instances @ workloads in + run_and_print instances tests + ) diff --git a/ocaml/tests/bench/bench_tracing.ml b/ocaml/tests/bench/bench_tracing.ml new file mode 100644 index 00000000000..eebe6e6aef2 --- /dev/null +++ b/ocaml/tests/bench/bench_tracing.ml @@ -0,0 +1,87 @@ +open Bechamel + +let ( let@ ) f x = f x + +(* TODO: before *) + +let trace_test_inner span = + let@ span = + Tracing.with_child_trace + ~attributes:[("foo", "testing")] + span ~name:__FUNCTION__ + in + let@ _ = + Tracing.with_child_trace ~attributes:[("bar", "val")] span ~name:"test" + in + Sys.opaque_identity ignore () + +let trace_test_span _ = Tracing.with_tracing ~name:__FUNCTION__ trace_test_inner + +let trace_test_off _ = trace_test_inner None + +let uuid = "TEST" + +let export_thread = + (* need to ensure this isn't running outside the benchmarked section, + or bechamel might fail with 'Failed to stabilize GC' + *) + let after _ = Tracing_export.flush_and_exit () in + Bechamel_simple_cli.thread_workload ~before:Tracing_export.main ~after + ~run:ignore + +let workload1 = + Bechamel_simple_cli.thread_workload ~before:ignore ~after:ignore + ~run:trace_test_span + +let create_gc_work = + let a = Array.make 1_000 "" in + fun () -> + (* create work for the GC by continously creating a lot of short lived strings *) + Sys.opaque_identity (Array.iteri (fun i _ -> a.(i) <- String.make 2 'x') a) + +let workload2 = + Bechamel_simple_cli.thread_workload ~before:ignore ~after:ignore + ~run:create_gc_work + +let workloads = [workload1; workload2] + +let allocate () = + Tracing.TracerProvider.create ~enabled:true ~attributes:[] ~endpoints:[] + ~name_label:__MODULE__ ~uuid ; + Tracing_export.main () + +let free t = + Tracing.TracerProvider.destroy ~uuid ; + Tracing_export.flush_and_exit () ; + Thread.join t + +let test_tracing_on ?(overflow = false) ~name f = + let allocate () = + if overflow then ( + Tracing.Spans.set_max_spans 10 ; + Tracing.Spans.set_max_traces 10 + ) ; + allocate () + and free t = + if overflow then ( + Tracing.Spans.set_max_spans Bechamel_simple_cli.limit ; + Tracing.Spans.set_max_traces Bechamel_simple_cli.limit + ) ; + free t + in + Test.make_with_resource ~name ~allocate ~free Test.uniq f + +let benchmarks = + Tracing.Spans.set_max_spans Bechamel_simple_cli.limit ; + Tracing.Spans.set_max_traces Bechamel_simple_cli.limit ; + Test.make_grouped ~name:"tracing" + [ + Test.make ~name:"overhead(off)" (Staged.stage trace_test_off) + ; test_tracing_on ~name:"overhead(on, no span)" (Staged.stage trace_test_off) + ; test_tracing_on ~name:"overhead(on, create span)" + (Staged.stage trace_test_span) + ; test_tracing_on ~overflow:true ~name:"max span overflow" + (Staged.stage trace_test_span) + ] + +let () = Bechamel_simple_cli.cli ~always:[export_thread] ~workloads benchmarks diff --git a/ocaml/tests/bench/dune b/ocaml/tests/bench/dune new file mode 100644 index 00000000000..0d11700e285 --- /dev/null +++ b/ocaml/tests/bench/dune @@ -0,0 +1,4 @@ +(executable + (name bench_tracing) + (libraries tracing bechamel bechamel-notty notty.unix tracing_export threads.posix fmt notty) +) diff --git a/ocaml/tests/common/test_common.ml b/ocaml/tests/common/test_common.ml index 1c1685f693d..293317518a4 100644 --- a/ocaml/tests/common/test_common.ml +++ b/ocaml/tests/common/test_common.ml @@ -296,7 +296,7 @@ let make_pool ~__context ~master ?(name_label = "") ?(name_description = "") ?(repository_proxy_username = "") ?(repository_proxy_password = Ref.null) ?(migration_compression = false) ?(coordinator_bias = true) ?(telemetry_uuid = Ref.null) ?(telemetry_frequency = `weekly) - ?(telemetry_next_collection = API.Date.never) + ?(telemetry_next_collection = API.Date.epoch) ?(last_update_sync = API.Date.epoch) ?(update_sync_frequency = `daily) ?(update_sync_day = 0L) ?(update_sync_enabled = false) ?(recommendations = []) () = @@ -393,7 +393,7 @@ let make_vdi ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) ?(read_only = false) ?(other_config = []) ?(storage_lock = false) ?(location = "") ?(managed = false) ?(missing = false) ?(parent = Ref.null) ?(xenstore_data = []) ?(sm_config = []) ?(is_a_snapshot = false) - ?(snapshot_of = Ref.null) ?(snapshot_time = API.Date.never) ?(tags = []) + ?(snapshot_of = Ref.null) ?(snapshot_time = API.Date.epoch) ?(tags = []) ?(allow_caching = true) ?(on_boot = `persist) ?(metadata_of_pool = Ref.make ()) ?(metadata_latest = true) ?(is_tools_iso = false) ?(cbt_enabled = false) () = @@ -516,9 +516,9 @@ let make_pool_update ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) let make_session ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) ?(this_host = Ref.null) ?(this_user = Ref.null) - ?(last_active = API.Date.never) ?(pool = false) ?(other_config = []) + ?(last_active = API.Date.epoch) ?(pool = false) ?(other_config = []) ?(is_local_superuser = false) ?(subject = Ref.null) - ?(validation_time = API.Date.never) ?(auth_user_sid = "") + ?(validation_time = API.Date.epoch) ?(auth_user_sid = "") ?(auth_user_name = "") ?(rbac_permissions = []) ?(parent = Ref.null) ?(originator = "test") ?(client_certificate = false) () = Db.Session.create ~__context ~ref ~uuid ~this_host ~this_user ~last_active diff --git a/ocaml/tests/dune b/ocaml/tests/dune index 7d351d5e45c..fe620e8fc7f 100644 --- a/ocaml/tests/dune +++ b/ocaml/tests/dune @@ -122,7 +122,7 @@ xml-light2 yojson ) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv)) + (preprocess (per_module ((pps ppx_deriving_rpc) Test_cluster_host))) ) (test (name test_storage_smapiv1_wrapper) @@ -168,3 +168,5 @@ ) (env (_ (env-vars (XAPI_TEST 1)))) + +(data_only_dirs test_data tests) diff --git a/ocaml/tests/has_vendor_device_test.py b/ocaml/tests/has_vendor_device_test.py deleted file mode 100644 index 5d5ceaf542d..00000000000 --- a/ocaml/tests/has_vendor_device_test.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import xmlrpclib -import sys - -s=xmlrpclib.Server("http://localhost/") -sess=s.session.login_with_password("root","xenroot")['Value'] - -pool = s.pool.get_all(sess)['Value'][0] -restrictions = s.pool.get_restrictions(sess,pool)['Value'] - -base_request = {'user_version':'1', 'is_a_template':False, 'affinity':'', 'memory_static_max':'4', 'memory_static_min':'1', 'memory_dynamic_max':'3', 'memory_dynamic_min':'2', 'VCPUs_params':{}, 'VCPUs_max':'1', 'VCPUs_at_startup':'1', 'name_label':'hello', 'name_description':'hi', 'memory_target':'2', 'actions_after_shutdown':'destroy', 'actions_after_reboot':'restart', 'actions_after_crash':'destroy', 'PV_bootloader':'', 'PV_kernel':'', 'PV_ramdisk':'', 'PV_args':'', 'PV_bootloader_args':'', 'PV_legacy_args':'', 'HVM_boot_policy':'', 'HVM_boot_params':{}, 'HVM_shadow_multiplier':1.0, 'platform':{}, 'PCI_bus':'', 'other_config':{}, 'recommendations':'', 'xenstore_data':{}, 'ha_always_run':False, 'ha_restart_priority':'1', 'tags':[], 'blocked_operations':{}, 'protection_policy':'', 'is_snapshot_from_vmpp':False, 'appliance':'', 'start_delay':'0', 'shutdown_delay':'0', 'order':'0', 'suspend_SR':'', 'version':'0', 'generation_id':'', 'hardware_platform_version':'0'} - -# - -def create(): - res = s.VM.create(sess, base_request) - return res - -def create_with_vd(b): - request = base_request.copy() - request['has_vendor_device']=b - return s.VM.create(sess,request) - -# VD in request | OK by license | pool.policy_no_vendor_device | resulting VM.has_vendor_device -# - | False | False | False -# False | False | False | False -# True | False | False | Failure -# - | False | True | False -# False | False | True | False -# True | False | True | Failure - - -def test_with_restriction(): # OK by license column above - # Expect this to be successful on an unlicensed host, and for the field to be 'false' - print("running restricted tests (license says you're not allowed the vendor device)") - - s.pool.set_policy_no_vendor_device(sess,pool,False) - -# - | False | False | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | False | False | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | False | False | Failure - res = create_with_vd(True) - print("Expecting failure: got %s" % res['Status']) - assert(res['Status']=='Failure') - - s.pool.set_policy_no_vendor_device(sess,pool,True) - -# - | False | True | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | False | True | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | False | True | Failure - res = create_with_vd(True) - print("Expecting failure: got %s" % res['Status']) - assert(res['Status']=='Failure') - - - -def test_no_restriction(): - print("running unrestricted tests") - -# - | True | False | True -# False | True | False | False -# True | True | False | True -# - | True | True | False -# False | True | True | False -# True | True | True | True - - s.pool.set_policy_no_vendor_device(sess,pool,False) - -# - | True | False | True - res = create() - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | True | False | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | True | False | True - res = create_with_vd(True) - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - - s.pool.set_policy_no_vendor_device(sess,pool,True) - -# - | True | True | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | True | True | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | True | True | True - res = create_with_vd(True) - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - - - -if restrictions['restrict_pci_device_for_auto_update'] == "true": - test_with_restriction() -else: - test_no_restriction() - - - - - diff --git a/ocaml/tests/record_util/old_enum_all.ml b/ocaml/tests/record_util/old_enum_all.ml index 8c5b422365c..f58cdc7542f 100644 --- a/ocaml/tests/record_util/old_enum_all.ml +++ b/ocaml/tests/record_util/old_enum_all.ml @@ -1,3 +1,7 @@ +let all_placement_policy = [`anti_affinity; `normal] + +let all_origin = [`remote; `bundle] + let all_certificate_type = [`ca; `host; `host_internal] let all_cluster_host_operation = [`enable; `disable; `destroy] @@ -22,7 +26,7 @@ let all_vgpu_type_implementation = let all_allocation_algorithm = [`breadth_first; `depth_first] -let all_pgpu_dom0_access = +let all_pci_dom0_access = [`enabled; `disable_on_reboot; `disabled; `enable_on_reboot] let all_sriov_configuration_mode = [`sysfs; `modprobe; `manual; `unknown] @@ -135,16 +139,19 @@ let all_host_numa_affinity_policy = [`any; `best_effort; `default_policy] let all_host_sched_gran = [`core; `cpu; `socket] -let all_latest_synced_updates_applied_state = [`yes; `no; `unknown] - let all_update_guidances = [ `reboot_host ; `reboot_host_on_livepatch_failure + ; `reboot_host_on_kernel_livepatch_failure + ; `reboot_host_on_xen_livepatch_failure ; `restart_toolstack ; `restart_device_model + ; `restart_vm ] +let all_latest_synced_updates_applied_state = [`yes; `no; `unknown] + let all_host_display = [`enabled; `disable_on_reboot; `disabled; `enable_on_reboot] @@ -159,6 +166,7 @@ let all_host_allowed_operations = ; `vm_resume ; `vm_migrate ; `apply_updates + ; `enable ] let all_vm_appliance_operation = @@ -180,6 +188,8 @@ let all_tristate_type = [`yes; `no; `unspecified] let all_domain_type = [`hvm; `pv; `pv_in_pvh; `pvh; `unspecified] +let all_vm_uefi_mode = [`setup; `user] + let all_on_crash_behaviour = [ `destroy @@ -267,6 +277,7 @@ let all_pool_allowed_operations = ; `designate_new_master ; `configure_repositories ; `sync_updates + ; `sync_bundle ; `get_updates ; `apply_updates ; `tls_verification_enable @@ -274,6 +285,7 @@ let all_pool_allowed_operations = ; `exchange_certificates_on_join ; `exchange_ca_certificates_on_join ; `copy_primary_host_certs + ; `eject ] let all_task_status_type = @@ -283,9 +295,22 @@ let all_task_allowed_operations = [`cancel; `destroy] let all_hello_return = [`ok; `unknown_host; `cannot_talk_back] +let all_pool_guest_secureboot_readiness = [`ready; `ready_no_dbx; `not_ready] + let all_livepatch_status = [`ok_livepatch_complete; `ok_livepatch_incomplete; `ok] -let all_sr_health = [`healthy; `recovering] +let all_vm_secureboot_readiness = + [ + `not_supported + ; `disabled + ; `first_boot + ; `ready + ; `ready_no_dbx + ; `setup_mode + ; `certs_incomplete + ] + +let all_sr_health = [`healthy; `recovering; `unreachable; `unavailable] let all_event_operation = [`add; `del; `_mod] diff --git a/ocaml/tests/record_util/old_record_util.ml b/ocaml/tests/record_util/old_record_util.ml index ad38fe7ea37..c854f27f5aa 100644 --- a/ocaml/tests/record_util/old_record_util.ml +++ b/ocaml/tests/record_util/old_record_util.ml @@ -15,6 +15,9 @@ exception Record_failure of string +let record_failure fmt = + Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt + let to_str = function Rpc.String x -> x | _ -> failwith "Invalid" let certificate_type_to_string = function @@ -69,7 +72,7 @@ let string_to_class str = | "Certificate" -> `Certificate | _ -> - failwith "Bad type" + record_failure "Bad type" let power_state_to_string state = match state with @@ -151,6 +154,38 @@ let string_to_vm_operation x = else List.assoc x table +let vm_uefi_mode_of_string = function + | "setup" -> + `setup + | "user" -> + `user + | s -> + record_failure "Expected 'user','setup', got %s" s + +let vm_secureboot_readiness_to_string = function + | `not_supported -> + "not_supported" + | `disabled -> + "disabled" + | `first_boot -> + "first_boot" + | `ready -> + "ready" + | `ready_no_dbx -> + "ready_no_dbx" + | `setup_mode -> + "setup_mode" + | `certs_incomplete -> + "certs_incomplete" + +let pool_guest_secureboot_readiness_to_string = function + | `ready -> + "ready" + | `ready_no_dbx -> + "ready_no_dbx" + | `not_ready -> + "not_ready" + let pool_operation_to_string = function | `ha_enable -> "ha_enable" @@ -166,6 +201,8 @@ let pool_operation_to_string = function "configure_repositories" | `sync_updates -> "sync_updates" + | `sync_bundle -> + "sync_bundle" | `get_updates -> "get_updates" | `apply_updates -> @@ -178,6 +215,8 @@ let pool_operation_to_string = function "exchange_ca_certificates_on_join" | `copy_primary_host_certs -> "copy_primary_host_certs" + | `eject -> + "eject" let host_operation_to_string = function | `provision -> @@ -198,16 +237,24 @@ let host_operation_to_string = function "VM.migrate" | `apply_updates -> "apply_updates" + | `enable -> + "enable" let update_guidance_to_string = function | `reboot_host -> "reboot_host" | `reboot_host_on_livepatch_failure -> "reboot_host_on_livepatch_failure" + | `reboot_host_on_kernel_livepatch_failure -> + "reboot_host_on_kernel_livepatch_failure" + | `reboot_host_on_xen_livepatch_failure -> + "reboot_host_on_xen_livepatch_failure" | `restart_toolstack -> "restart_toolstack" | `restart_device_model -> "restart_device_model" + | `restart_vm -> + "restart_vm" let latest_synced_updates_applied_state_to_string = function | `yes -> @@ -343,12 +390,8 @@ let string_to_vif_locking_mode = function | "disabled" -> `disabled | s -> - raise - (Record_failure - ("Expected 'network_default', 'locked', 'unlocked', 'disabled', got " - ^ s - ) - ) + record_failure + "Expected 'network_default', 'locked', 'unlocked', 'disabled', got %s" s let vmss_type_to_string = function | `snapshot -> @@ -366,12 +409,8 @@ let string_to_vmss_type = function | "snapshot_with_quiesce" -> `snapshot_with_quiesce | s -> - raise - (Record_failure - ("Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got " - ^ s - ) - ) + record_failure + "Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got %s" s let vmss_frequency_to_string = function | `hourly -> @@ -389,7 +428,7 @@ let string_to_vmss_frequency = function | "weekly" -> `weekly | s -> - raise (Record_failure ("Expected 'hourly', 'daily', 'weekly', got " ^ s)) + record_failure "Expected 'hourly', 'daily', 'weekly', got %s" s let network_default_locking_mode_to_string = function | `unlocked -> @@ -403,7 +442,7 @@ let string_to_network_default_locking_mode = function | "disabled" -> `disabled | s -> - raise (Record_failure ("Expected 'unlocked' or 'disabled', got " ^ s)) + record_failure "Expected 'unlocked' or 'disabled', got %s" s let network_purpose_to_string : API.network_purpose -> string = function | `nbd -> @@ -417,7 +456,7 @@ let string_to_network_purpose : string -> API.network_purpose = function | "insecure_nbd" -> `insecure_nbd | s -> - raise (Record_failure ("Expected a network purpose string; got " ^ s)) + record_failure "Expected a network purpose string; got %s" s let vm_appliance_operation_to_string = function | `start -> @@ -605,7 +644,7 @@ let string_to_on_normal_exit s = | "restart" -> `restart | _ -> - raise (Record_failure ("Expected 'destroy' or 'restart', got " ^ s)) + record_failure "Expected 'destroy' or 'restart', got %s" s let on_crash_behaviour_to_string x = match x with @@ -637,14 +676,11 @@ let string_to_on_crash_behaviour s = | "rename_restart" -> `rename_restart | _ -> - raise - (Record_failure - ("Expected 'destroy', 'coredump_and_destroy'," - ^ "'restart', 'coredump_and_restart', 'preserve' or \ - 'rename_restart', got " - ^ s - ) - ) + record_failure + "Expected 'destroy', 'coredump_and_destroy', \ + 'restart','coredump_and_restart', 'preserve' or 'rename_restart', got \ + %s" + s let on_softreboot_behaviour_to_string x = match x with @@ -668,14 +704,11 @@ let string_to_on_softreboot_behaviour s = | "soft_reboot" -> `soft_reboot | _ -> - raise - (Record_failure - ("Expected 'destroy', 'coredump_and_destroy'," - ^ "'restart', 'coredump_and_restart', 'preserve', 'soft_reboot' or \ - 'rename_restart', got " - ^ s - ) - ) + record_failure + "Expected 'destroy', 'coredump_and_destroy', 'restart', \ + 'coredump_and_restart', 'preserve', 'soft_reboot' or \ + 'rename_restart', got %s" + s let host_display_to_string h = match h with @@ -697,7 +730,7 @@ let host_sched_gran_of_string s = | "socket" -> `socket | _ -> - raise (Record_failure ("Expected 'core','cpu', 'socket', got " ^ s)) + record_failure "Expected 'core','cpu', 'socket', got %s" s let host_sched_gran_to_string = function | `core -> @@ -724,10 +757,8 @@ let host_numa_affinity_policy_of_string a = | "default_policy" -> `default_policy | s -> - raise - (Record_failure - ("Expected 'any', 'best_effort' or 'default_policy', got " ^ s) - ) + record_failure "Expected 'any', 'best_effort' or 'default_policy', got %s" + s let pci_dom0_access_to_string x = host_display_to_string x @@ -738,7 +769,7 @@ let string_to_vdi_onboot s = | "reset" -> `reset | _ -> - raise (Record_failure ("Expected 'persist' or 'reset', got " ^ s)) + record_failure "Expected 'persist' or 'reset', got %s" s let string_to_vbd_mode s = match String.lowercase_ascii s with @@ -747,7 +778,7 @@ let string_to_vbd_mode s = | "rw" -> `RW | _ -> - raise (Record_failure ("Expected 'RO' or 'RW', got " ^ s)) + record_failure "Expected 'RO' or 'RW', got %s" s let vbd_mode_to_string = function `RO -> "ro" | `RW -> "rw" @@ -760,7 +791,7 @@ let string_to_vbd_type s = | "floppy" -> `Floppy | _ -> - raise (Record_failure ("Expected 'CD' or 'Disk', got " ^ s)) + record_failure "Expected 'CD' or 'Disk', got %s" s let power_to_string h = match h with @@ -819,7 +850,7 @@ let ip_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'dhcp','none' or 'static', got " ^ s)) + record_failure "Expected 'dhcp','none' or 'static', got %s" s let vif_ipv4_configuration_mode_to_string = function | `None -> @@ -834,7 +865,7 @@ let vif_ipv4_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'none' or 'static', got " ^ s)) + record_failure "Expected 'none' or 'static', got %s" s let ipv6_configuration_mode_to_string = function | `None -> @@ -857,10 +888,7 @@ let ipv6_configuration_mode_of_string m = | "autoconf" -> `Autoconf | s -> - raise - (Record_failure - ("Expected 'dhcp','none' 'autoconf' or 'static', got " ^ s) - ) + record_failure "Expected 'dhcp','none' 'autoconf' or 'static', got %s" s let vif_ipv6_configuration_mode_to_string = function | `None -> @@ -875,7 +903,7 @@ let vif_ipv6_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'none' or 'static', got " ^ s)) + record_failure "Expected 'none' or 'static', got %s" s let primary_address_type_to_string = function | `IPv4 -> @@ -890,7 +918,7 @@ let primary_address_type_of_string m = | "ipv6" -> `IPv6 | s -> - raise (Record_failure ("Expected 'ipv4' or 'ipv6', got " ^ s)) + record_failure "Expected 'ipv4' or 'ipv6', got %s" s let bond_mode_to_string = function | `balanceslb -> @@ -909,7 +937,7 @@ let bond_mode_of_string m = | "lacp" -> `lacp | s -> - raise (Record_failure ("Invalid bond mode. Got " ^ s)) + record_failure "Invalid bond mode. Got %s" s let allocation_algorithm_to_string = function | `depth_first -> @@ -924,7 +952,7 @@ let allocation_algorithm_of_string a = | "breadth-first" -> `breadth_first | s -> - raise (Record_failure ("Invalid allocation algorithm. Got " ^ s)) + record_failure "Invalid allocation algorithm. Got %s" s let pvs_proxy_status_to_string = function | `stopped -> @@ -945,12 +973,13 @@ let cluster_host_operation_to_string op = let bool_of_string s = match String.lowercase_ascii s with - | "true" | "yes" -> + | "true" | "t" | "yes" | "y" | "1" -> true - | "false" | "no" -> + | "false" | "f" | "no" | "n" | "0" -> false | _ -> - raise (Record_failure ("Expected 'true','yes','false','no', got " ^ s)) + record_failure + "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s let sdn_protocol_of_string s = match String.lowercase_ascii s with @@ -959,7 +988,7 @@ let sdn_protocol_of_string s = | "pssl" -> `pssl | _ -> - raise (Record_failure ("Expected 'ssl','pssl', got " ^ s)) + record_failure "Expected 'ssl','pssl', got %s" s let sdn_protocol_to_string = function `ssl -> "ssl" | `pssl -> "pssl" @@ -970,7 +999,7 @@ let tunnel_protocol_of_string s = | "vxlan" -> `vxlan | _ -> - raise (Record_failure ("Expected 'gre','vxlan', got " ^ s)) + record_failure "Expected 'gre','vxlan', got %s" s let tunnel_protocol_to_string = function `gre -> "gre" | `vxlan -> "vxlan" @@ -1000,14 +1029,6 @@ let network_sriov_configuration_mode_to_string = function | `unknown -> "unknown" -(* string_to_string_map_to_string *) -let s2sm_to_string sep x = - String.concat sep (List.map (fun (a, b) -> a ^ ": " ^ b) x) - -(* string to blob ref map to string *) -let s2brm_to_string get_uuid_from_ref sep x = - String.concat sep (List.map (fun (n, r) -> n ^ ": " ^ get_uuid_from_ref r) x) - let on_boot_to_string onboot = match onboot with `reset -> "reset" | `persist -> "persist" @@ -1043,119 +1064,42 @@ let domain_type_of_string x = | "pvh" -> `pvh | s -> - raise (Record_failure ("Invalid domain type. Got " ^ s)) + record_failure "Invalid domain type. Got %s" s let vtpm_operation_to_string (op : API.vtpm_operations) = match op with `destroy -> "destroy" -(** Parse a string which might have a units suffix on the end *) -let bytes_of_string field x = +(** parse [0-9]*(b|bytes|kib|mib|gib|tib)* to bytes *) +let bytes_of_string str = let ( ** ) a b = Int64.mul a b in - let max_size_TiB = - Int64.div Int64.max_int (1024L ** 1024L ** 1024L ** 1024L) - in - (* detect big number that cannot be represented by Int64. *) - let int64_of_string s = - try Int64.of_string s - with _ -> - if s = "" then - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly \ - with suffix)" - field - ) - ) ; - let alldigit = ref true and i = ref (String.length s - 1) in - while !alldigit && !i > 0 do - alldigit := Astring.Char.Ascii.is_digit s.[!i] ; - decr i - done ; - if !alldigit then - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': number too big (maximum = %Ld TiB)" - field max_size_TiB - ) - ) - else - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly \ - with suffix)" - field - ) - ) - in - match - Astring.( - String.fields ~empty:false ~is_sep:(fun c -> - Char.Ascii.(is_white c || is_digit c) - ) - ) - x - with - | [] -> - (* no suffix on the end *) - int64_of_string x - | [suffix] -> - let number = - match - Astring.( - String.fields ~empty:false ~is_sep:(Fun.negate Char.Ascii.is_digit) - ) - x - with - | [number] -> - int64_of_string number - | _ -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer \ - (possibly with suffix)" - field - ) - ) - in - let multiplier = - match suffix with - | "bytes" -> - 1L - | "KiB" -> - 1024L - | "MiB" -> - 1024L ** 1024L - | "GiB" -> - 1024L ** 1024L ** 1024L - | "TiB" -> - 1024L ** 1024L ** 1024L ** 1024L - | x -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': Unknown suffix: '%s' (try \ - KiB, MiB, GiB or TiB)" - field x - ) - ) - in - (* FIXME: detect overflow *) - number ** multiplier - | _ -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly with \ - suffix)" - field - ) - ) + let invalid msg = raise (Invalid_argument msg) in + try + Scanf.sscanf str "%Ld %s" @@ fun size suffix -> + match String.lowercase_ascii suffix with + | _ when size < 0L -> + invalid str + | "bytes" | "b" | "" -> + size + | "kib" | "kb" | "k" -> + size ** 1024L + | "mib" | "mb" | "m" -> + size ** 1024L ** 1024L + | "gib" | "gb" | "g" -> + size ** 1024L ** 1024L ** 1024L + | "tib" | "tb" | "t" -> + size ** 1024L ** 1024L ** 1024L ** 1024L + | _ -> + invalid suffix + with _ -> invalid str -(* Vincent's random mac utils *) +(** Parse a string which might have a units suffix on the end *) +let bytes_of_string field x = + try bytes_of_string x + with Invalid_argument _ -> + record_failure + "Failed to parse field '%s': expecting an integer (possibly with suffix \ + KiB, MiB, GiB, TiB), got '%s'" + field x let mac_from_int_array macs = (* make sure bit 1 (local) is set and bit 0 (unicast) is clear *) @@ -1179,4 +1123,21 @@ let update_sync_frequency_of_string s = | "weekly" -> `weekly | _ -> - raise (Record_failure ("Expected 'daily', 'weekly', got " ^ s)) + record_failure "Expected 'daily', 'weekly', got %s" s + +let vm_placement_policy_to_string = function + | `normal -> + "normal" + | `anti_affinity -> + "anti-affinity" + +let vm_placement_policy_of_string a = + match String.lowercase_ascii a with + | "normal" -> + `normal + | "anti-affinity" -> + `anti_affinity + | s -> + record_failure "Invalid VM placement policy, got %s" s + +let repo_origin_to_string = function `remote -> "remote" | `bundle -> "bundle" diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 3ed5c2d7351..c3c54f326e8 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -40,22 +40,37 @@ let exn_to_string_strip e = *) e |> Printexc.to_string |> drop_module_prefix |> drop_exn_arguments -let exn_equal_strip a b = - String.equal (exn_to_string_strip a) (exn_to_string_strip b) - -let exn = V1.testable (Fmt.of_to_string exn_to_string_strip) exn_equal_strip - let test_of_string ~name all_enum old_to_string of_string_opt = + let exn_equal_strip a b = + String.equal (exn_to_string_strip a) (exn_to_string_strip b) + in + (* New function is allowed to be more lenient and accept more cases *) + let custom_eq expected actual = + match (expected, actual) with + | Error _, Ok _ -> + true + | Error a, Error b -> + exn_equal_strip a b + | a, b -> + a = b + in of_string_opt |> Option.map (fun (old_of_string, new_of_string) -> let make input = V1.test_case input `Quick @@ fun () -> let expected = wrap old_of_string input in let actual = wrap new_of_string input in - let pp_enum = Fmt.of_to_string old_to_string in + let pp_enum_result = + Fmt.of_to_string (function + | Ok a -> + old_to_string a + | Error b -> + exn_to_string_strip b + ) + in V1.( check' ~msg:"compatible" ~expected ~actual - @@ result (testable pp_enum ( = )) exn + @@ testable pp_enum_result custom_eq ) in ( name ^ "of_string" @@ -81,7 +96,15 @@ let mk line of_string_opt all_enum (old_to_string, new_to_string) = (* Created by: ``` -grep 'let.*to_string' old_record_util.ml | sed -re 's/^let ([^ ]+)_to_string.*/\1/' | while read ENUM; do if grep "${ENUM}_of_string" old_record_util.ml >/dev/null; then echo "; mk __LINE__ (Some (O.${ENUM}_of_string, N.${ENUM}_of_string)) all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; else echo "; mk __LINE__ None all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; fi; done +grep 'let.*to_string' old_record_util.ml | \ +sed -re 's/^let ([^ ]+)_to_string.*/\1/' | \ +while read ENUM; do + if grep "${ENUM}_of_string" old_record_util.ml >/dev/null; then + echo "; mk __LINE__ (Some (O.${ENUM}_of_string, N.${ENUM}_of_string)) all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; + else + echo "; mk __LINE__ None all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; + fi; +done ``` and then tweaked to compile using LSP hints where the names were not consistent (e.g. singular vs plural, etc.) *) @@ -89,39 +112,69 @@ let tests = [ mk __LINE__ None all_certificate_type (O.certificate_type_to_string, N.certificate_type_to_string) - ; mk __LINE__ None all_cls (O.class_to_string, N.class_to_string) + ; mk __LINE__ + (Some (O.string_to_class, N.cls_of_string)) + all_cls + (O.class_to_string, N.cls_to_string) ; mk __LINE__ None all_vm_power_state - (O.power_state_to_string, N.power_state_to_string) + (O.power_state_to_string, N.vm_power_state_to_string) + ; mk __LINE__ None all_vm_power_state + (O.power_to_string, N.vm_power_state_to_lowercase_string) ; mk __LINE__ None all_vm_operations (O.vm_operation_to_string, N.vm_operation_to_string) + ; mk __LINE__ + (Some (O.vm_uefi_mode_of_string, N.vm_uefi_mode_of_string)) + all_vm_uefi_mode + (N.vm_uefi_mode_to_string, N.vm_uefi_mode_to_string) + ; mk __LINE__ None all_vm_secureboot_readiness + (O.vm_secureboot_readiness_to_string, N.vm_secureboot_readiness_to_string) + ; mk __LINE__ None all_pool_guest_secureboot_readiness + ( O.pool_guest_secureboot_readiness_to_string + , N.pool_guest_secureboot_readiness_to_string + ) ; mk __LINE__ None all_pool_allowed_operations - (O.pool_operation_to_string, N.pool_operation_to_string) + (O.pool_operation_to_string, N.pool_allowed_operations_to_string) ; mk __LINE__ None all_host_allowed_operations (O.host_operation_to_string, N.host_operation_to_string) ; mk __LINE__ None all_update_guidances - (O.update_guidance_to_string, N.update_guidance_to_string) + (O.update_guidance_to_string, N.update_guidances_to_string) ; mk __LINE__ None all_latest_synced_updates_applied_state ( O.latest_synced_updates_applied_state_to_string , N.latest_synced_updates_applied_state_to_string ) ; mk __LINE__ None all_vdi_operations - (O.vdi_operation_to_string, N.vdi_operation_to_string) + (O.vdi_operation_to_string, N.vdi_operations_to_string) ; mk __LINE__ None all_storage_operations (O.sr_operation_to_string, N.sr_operation_to_string) ; mk __LINE__ None all_vbd_operations - (O.vbd_operation_to_string, N.vbd_operation_to_string) + (O.vbd_operation_to_string, N.vbd_operations_to_string) ; mk __LINE__ None all_vif_operations - (O.vif_operation_to_string, N.vif_operation_to_string) - ; mk __LINE__ None all_vif_locking_mode + (O.vif_operation_to_string, N.vif_operations_to_string) + ; mk __LINE__ + (Some (O.string_to_vif_locking_mode, N.vif_locking_mode_of_string)) + all_vif_locking_mode (O.vif_locking_mode_to_string, N.vif_locking_mode_to_string) - ; mk __LINE__ None all_vmss_type (O.vmss_type_to_string, N.vmss_type_to_string) - ; mk __LINE__ None all_vmss_frequency + ; mk __LINE__ + (Some (O.string_to_vmss_type, N.vmss_type_of_string)) + all_vmss_type + (O.vmss_type_to_string, N.vmss_type_to_string) + ; mk __LINE__ + (Some (O.string_to_vmss_frequency, N.vmss_frequency_of_string)) + all_vmss_frequency (O.vmss_frequency_to_string, N.vmss_frequency_to_string) - ; mk __LINE__ None all_network_default_locking_mode + ; mk __LINE__ + (Some + ( O.string_to_network_default_locking_mode + , N.network_default_locking_mode_of_string + ) + ) + all_network_default_locking_mode ( O.network_default_locking_mode_to_string , N.network_default_locking_mode_to_string ) - ; mk __LINE__ None all_network_purpose + ; mk __LINE__ + (Some (O.string_to_network_purpose, N.network_purpose_of_string)) + all_network_purpose (O.network_purpose_to_string, N.network_purpose_to_string) ; mk __LINE__ None all_vm_appliance_operation (O.vm_appliance_operation_to_string, N.vm_appliance_operation_to_string) @@ -135,12 +188,22 @@ let tests = ; mk __LINE__ None all_task_allowed_operations (O.task_allowed_operations_to_string, N.task_allowed_operations_to_string) (*; mk __LINE__ None all_alert_level (O.alert_level_to_string, N.alert_level_to_string)*) - ; mk __LINE__ None all_on_normal_exit + ; mk __LINE__ + (Some (O.string_to_on_normal_exit, N.on_normal_exit_of_string)) + all_on_normal_exit (O.on_normal_exit_to_string, N.on_normal_exit_to_string) - ; mk __LINE__ None all_on_crash_behaviour + ; mk __LINE__ + (Some (O.string_to_on_crash_behaviour, N.on_crash_behaviour_of_string)) + all_on_crash_behaviour (O.on_crash_behaviour_to_string, N.on_crash_behaviour_to_string) - ; mk __LINE__ None all_on_softreboot_behavior - (O.on_softreboot_behaviour_to_string, N.on_softreboot_behaviour_to_string) + ; mk __LINE__ + (Some + ( O.string_to_on_softreboot_behaviour + , N.on_softreboot_behavior_of_string + ) + ) + all_on_softreboot_behavior + (N.on_softreboot_behaviour_to_string, N.on_softreboot_behaviour_to_string) ; mk __LINE__ None all_host_display (O.host_display_to_string, N.host_display_to_string) ; mk __LINE__ @@ -157,9 +220,16 @@ let tests = ( O.host_numa_affinity_policy_to_string , N.host_numa_affinity_policy_to_string ) - ; mk __LINE__ None all_pgpu_dom0_access + ; mk __LINE__ None all_pci_dom0_access (O.pci_dom0_access_to_string, N.pci_dom0_access_to_string) - ; mk __LINE__ None all_vbd_mode (O.vbd_mode_to_string, N.vbd_mode_to_string) + ; mk __LINE__ + (Some (O.string_to_vbd_mode, N.vbd_mode_of_string)) + all_vbd_mode + (O.vbd_mode_to_string, N.vbd_mode_to_string) + ; mk __LINE__ + (Some (O.string_to_vbd_type, N.vbd_type_of_string)) + all_vbd_type + (N.vbd_type_to_string, N.vbd_type_to_string) (*; mk __LINE__ None all_power (O.power_to_string, N.power_to_string)*) ; mk __LINE__ None all_vdi_type (O.vdi_type_to_string, N.vdi_type_to_string) ; mk __LINE__ @@ -215,9 +285,9 @@ let tests = ; mk __LINE__ None all_cluster_host_operation (O.cluster_host_operation_to_string, N.cluster_host_operation_to_string) ; mk __LINE__ - (Some (O.sdn_protocol_of_string, N.sdn_protocol_of_string)) + (Some (O.sdn_protocol_of_string, N.sdn_controller_protocol_of_string)) all_sdn_controller_protocol - (O.sdn_protocol_to_string, N.sdn_protocol_to_string) + (O.sdn_protocol_to_string, N.sdn_controller_protocol_to_string) ; mk __LINE__ (Some (O.tunnel_protocol_of_string, N.tunnel_protocol_of_string)) all_tunnel_protocol @@ -225,12 +295,15 @@ let tests = ; mk __LINE__ None all_pif_igmp_status (O.pif_igmp_status_to_string, N.pif_igmp_status_to_string) ; mk __LINE__ None all_vusb_operations - (O.vusb_operation_to_string, N.vusb_operation_to_string) + (O.vusb_operation_to_string, N.vusb_operations_to_string) ; mk __LINE__ None all_sriov_configuration_mode ( O.network_sriov_configuration_mode_to_string - , N.network_sriov_configuration_mode_to_string + , N.sriov_configuration_mode_to_string ) - ; mk __LINE__ None all_on_boot (O.on_boot_to_string, N.on_boot_to_string) + ; mk __LINE__ + (Some (O.string_to_vdi_onboot, N.on_boot_of_string)) + all_on_boot + (O.on_boot_to_string, N.on_boot_to_string) ; mk __LINE__ None all_tristate_type (O.tristate_to_string, N.tristate_to_string) ; mk __LINE__ @@ -238,13 +311,18 @@ let tests = all_domain_type (O.domain_type_to_string, N.domain_type_to_string) ; mk __LINE__ None all_vtpm_operations - (O.vtpm_operation_to_string, N.vtpm_operation_to_string) + (O.vtpm_operation_to_string, N.vtpm_operations_to_string) ; mk __LINE__ (Some (O.update_sync_frequency_of_string, N.update_sync_frequency_of_string) ) all_update_sync_frequency (O.update_sync_frequency_to_string, N.update_sync_frequency_to_string) + ; mk __LINE__ + (Some (O.vm_placement_policy_of_string, N.vm_placement_policy_of_string)) + all_placement_policy + (O.vm_placement_policy_to_string, N.vm_placement_policy_to_string) + ; mk __LINE__ None all_origin (O.repo_origin_to_string, N.origin_to_string) ] |> List.concat diff --git a/ocaml/tests/test_client.ml b/ocaml/tests/test_client.ml index cdfa7690f79..1c3137721b8 100644 --- a/ocaml/tests/test_client.ml +++ b/ocaml/tests/test_client.ml @@ -13,7 +13,7 @@ let make_client_params ~__context = let rpc = Api_server.Server.dispatch_call req Unix.stdout in let session_id = let session_id = Ref.make () in - let now = Xapi_stdext_date.Date.of_float (Unix.time ()) in + let now = Xapi_stdext_date.Date.now () in let (_ : _ API.Ref.t) = Test_common.make_session ~__context ~ref:session_id ~this_host:(Helpers.get_localhost ~__context) diff --git a/ocaml/tests/test_features.ml b/ocaml/tests/test_features.ml index c2c4f5c25e8..53b167cc2f8 100644 --- a/ocaml/tests/test_features.ml +++ b/ocaml/tests/test_features.ml @@ -29,35 +29,46 @@ module OfAssocList = Generic.MakeStateless (struct let transform = of_assoc_list - (* Xen_motion and AD are enabled unless explicitly disabled. All other features - are disabled unless explitly enabled. *) + (* Some features are enabled unless explicitly disabled (see `enabled_when_unknown` + in features.ml). All other features are disabled unless explitly enabled. *) let tests = `QuickAndAutoDocumented [ - ([], [Xen_motion; AD; Updates]) + ([], [Xen_motion; AD; Updates; VM_start; VM_appliance_start]) ; ( [ ("restrict_xen_motion", "true") ; ("restrict_ad", "true") ; ("restrict_updates", "true") + ; ("restrict_vm_start", "true") + ; ("restrict_vm_appliance_start", "true") ] , [] ) - ; ([("restrict_xen_motion", "true")], [AD; Updates]) - ; ([("restrict_xen_motion", "false")], [Xen_motion; AD; Updates]) + ; ( [("restrict_xen_motion", "true")] + , [AD; Updates; VM_start; VM_appliance_start] + ) + ; ( [("restrict_xen_motion", "false")] + , [Xen_motion; AD; Updates; VM_start; VM_appliance_start] + ) ; ( [("restrict_xen_motion", "false"); ("restrict_dmc", "false")] - , [DMC; Xen_motion; AD; Updates] + , [DMC; Xen_motion; AD; Updates; VM_start; VM_appliance_start] ) ; ( [ ("restrict_xen_motion", "false") ; ("restrict_ad", "true") ; ("restrict_dmc", "false") ] - , [DMC; Xen_motion; Updates] + , [DMC; Xen_motion; Updates; VM_start; VM_appliance_start] ) ; ( [("enable_xha", "true"); ("restrict_xen_motion", "true")] - , [HA; AD; Updates] + , [HA; AD; Updates; VM_start; VM_appliance_start] + ) + ; ( [("restrict_updates", "true")] + , [Xen_motion; AD; VM_start; VM_appliance_start] + ) + ; ( [("restrict_vm_start", "true")] + , [Xen_motion; AD; Updates; VM_appliance_start] ) - ; ([("restrict_updates", "true")], [Xen_motion; AD]) ] end) diff --git a/ocaml/tests/test_observer.ml b/ocaml/tests/test_observer.ml index 322c586cb20..7ea23a05939 100644 --- a/ocaml/tests/test_observer.ml +++ b/ocaml/tests/test_observer.ml @@ -385,19 +385,15 @@ let test_all_spans_finish () = let _ = List.map (fun span -> Tracer.finish span) trace_spans in let remaining_spans, finished_spans = Spans.dump () in let result = - Hashtbl.fold - (fun k v acc -> - Option.fold ~none:0 ~some:List.length (Hashtbl.find_opt finished_spans k) - = List.length v - && acc - ) + TraceMap.fold + (fun _k v acc -> snd finished_spans = SpanMap.cardinal v && acc) active_spans true in Alcotest.(check bool) "All spans that are finished are moved to finished_spans" true result ; Alcotest.(check int) "traces with no spans are removed from the hashtable" 0 - (Hashtbl.length remaining_spans) ; + (TraceMap.cardinal remaining_spans) ; test_destroy ~__context ~self () let test_hashtbl_leaks () = @@ -440,8 +436,8 @@ let test_hashtbl_leaks () = let _, finished_spans = Spans.dump () in let filtered_spans_count = finished_spans - |> Hashtbl.to_seq_values - |> Seq.concat_map List.to_seq + |> fst + |> List.to_seq |> Seq.filter filter_export_spans |> Seq.length in @@ -587,6 +583,46 @@ let test_observed_components_of () = List.iter test_exp_comp expected_components_given_config_value ; observer_experimental_components := original_value +module type Id = sig + type t + + val compare : t -> t -> int + + val to_string : t -> string +end + +let testable_of_id (type a) (module I : Id with type t = a) = + let equal a b = I.compare a b = 0 and pp = Fmt.of_to_string I.to_string in + Alcotest.V1.testable pp equal + +let trace_id = testable_of_id (module Trace_id) + +let span_id = testable_of_id (module Span_id) + +let test_traceid () = + let expected = Trace_id.make () in + let str = expected |> Trace_id.to_string in + let actual = str |> Trace_id.of_string in + Alcotest.V1.check' trace_id ~expected ~actual ~msg:"roundtrip" ; + Alcotest.V1.(check' int ~expected:32 ~actual:(String.length str) ~msg:"length") + +let test_traceid' () = + let expected = "00000000000000010000000000000001" in + let actual = expected |> Trace_id.of_string |> Trace_id.to_string in + Alcotest.V1.(check' string ~expected ~actual ~msg:"roundtrip(str)") + +let test_spanid () = + let expected = Span_id.make () in + let str = expected |> Span_id.to_string in + let actual = str |> Span_id.of_string in + Alcotest.V1.check' span_id ~expected ~actual ~msg:"roundtrip" ; + Alcotest.V1.(check' int ~expected:16 ~actual:(String.length str) ~msg:"length") + +let test_spanid' () = + let expected = "0000000000000001" in + let actual = expected |> Span_id.of_string |> Span_id.to_string in + Alcotest.V1.(check' string ~expected ~actual ~msg:"roundtrip(str)") + let test = [ ( "test_observer_create_and_destroy" @@ -601,6 +637,10 @@ let test = ; ("test_tracing_exn_backtraces", `Quick, test_tracing_exn_backtraces) ; ("test_attribute_validation", `Quick, test_attribute_validation) ; ("test_observed_components_of", `Quick, test_observed_components_of) + ; ("test span_id", `Quick, test_spanid) + ; ("test trace_id", `Quick, test_traceid) + ; ("test span_id", `Quick, test_spanid') + ; ("test trace_id", `Quick, test_traceid') ] let () = diff --git a/ocaml/tests/test_pool_license.ml b/ocaml/tests/test_pool_license.ml index fbba9c74e36..aad9a145c11 100644 --- a/ocaml/tests/test_pool_license.ml +++ b/ocaml/tests/test_pool_license.ml @@ -29,22 +29,24 @@ let string_of_date_opt = function | None -> "None" | Some date -> - Printf.sprintf "Some %s" (Date.to_string date) + Printf.sprintf "Some %s" (Date.to_rfc3339 date) -let f2d = Date.of_float +let f2d = Date.of_unix_time -let f2d2s f = f |> Date.of_float |> Date.to_string +let f2d2s f = f |> Date.of_unix_time |> Date.to_rfc3339 let edition_to_int = [("edition1", 1); ("edition2", 2); ("edition3", 3)] module CompareDates = Generic.MakeStateless (struct module Io = struct - type input_t = Date.iso8601 option * Date.iso8601 option + type input_t = Date.t option * Date.t option type output_t = int let string_of_input_t = - Test_printers.(assoc_pair (option Date.to_string) (option Date.to_string)) + Test_printers.( + assoc_pair (option Date.to_rfc3339) (option Date.to_rfc3339) + ) let string_of_output_t = Test_printers.int end @@ -66,13 +68,13 @@ end) module PoolExpiryDate = Generic.MakeStateful (struct module Io = struct - type input_t = Date.iso8601 option list + type input_t = Date.t option list - type output_t = Date.iso8601 option + type output_t = Date.t option - let string_of_input_t = Test_printers.(list (option Date.to_string)) + let string_of_input_t = Test_printers.(list (option Date.to_rfc3339)) - let string_of_output_t = Test_printers.option Date.to_string + let string_of_output_t = Test_printers.option Date.to_rfc3339 end module State = Test_state.XapiDb @@ -86,7 +88,7 @@ module PoolExpiryDate = Generic.MakeStateful (struct | None -> [] | Some date -> - [("expiry", Date.to_string date)] + [("expiry", Date.to_rfc3339 date)] in let (_ : API.ref_host) = Test_common.make_host ~__context ~edition:"edition1" ~license_params @@ -201,10 +203,10 @@ module PoolLicenseState = Generic.MakeStateful (struct | None -> "never" | Some date -> - if date = Date.of_float License_check.never then + if date = Date.of_unix_time License_check.never then "never" else - Date.to_string date + Date.to_rfc3339 date in (pool_edition, pool_expiry) diff --git a/ocaml/tests/test_session.ml b/ocaml/tests/test_session.ml index 518dc221d72..4b441fc325b 100644 --- a/ocaml/tests/test_session.ml +++ b/ocaml/tests/test_session.ml @@ -1,8 +1,8 @@ module Date = Xapi_stdext_date.Date -let now = Date.of_string "2020-09-22T14:57:11Z" +let now = Date.of_iso8601 "2020-09-22T14:57:11Z" -let future = Date.of_string "2020-09-22T15:03:13Z" +let future = Date.of_iso8601 "2020-09-22T15:03:13Z" let fail_login ~__context ~uname ~originator ~now () = try diff --git a/ocaml/tests/test_updateinfo.ml b/ocaml/tests/test_updateinfo.ml index 2adb7c9d2db..6e05875e4f1 100644 --- a/ocaml/tests/test_updateinfo.ml +++ b/ocaml/tests/test_updateinfo.ml @@ -430,7 +430,7 @@ let fields_of_updateinfo = ) (list string) ; field "issued" - (fun (r : UpdateInfo.t) -> Xapi_stdext_date.Date.to_string r.issued) + (fun (r : UpdateInfo.t) -> Xapi_stdext_date.Date.to_rfc3339 r.issued) string ; field "severity" (fun (r : UpdateInfo.t) -> Severity.to_string r.severity) @@ -442,7 +442,11 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct module Io = struct type input_t = string - type output_t = ((string * UpdateInfo.t) list, exn) result + type output_t = + ( UpdateInfo.api_ver_t option * (UpdateInfo.id_t * UpdateInfo.t) list + , exn + ) + result let string_of_input_t s = s @@ -451,7 +455,10 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct str "%a" Dump.( result - ~ok:(list (pair string (record @@ fields_of_updateinfo))) + ~ok: + (pair (option string) + (list (pair string (record @@ fields_of_updateinfo))) + ) ~error:exn ) ) @@ -472,13 +479,22 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) ) ; (* No update in updateinfo.xml *) - ({| + ( {| - |}, Ok []) + |} + , Ok (None, []) + ) + ; (* No update in updateinfo.xml, but with xapi-api-version *) + ( {| + + + |} + , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) + ) ; (* Missing update_type *) ( {| - + UPDATE-0000 title @@ -494,7 +510,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing id *) ( {| - + title summary @@ -509,7 +525,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing summary *) ( {| - + UPDATE-0000 title @@ -524,7 +540,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing description *) ( {| - + UPDATE-0000 title @@ -537,35 +553,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Duplicate update ID *) ( {| - + UPDATE-0000 title @@ -588,7 +606,99 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) ) + ; (* Single update, without xapi-api-version *) + ( {| + + + UPDATE-0000 + title + summary + description + special information + https://update.details.info + + + High + + + |} + , Ok + ( None + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) + ) ; (* Single update *) + ( {| + + + UPDATE-0000 + title + summary + description + special information + https://update.details.info + + + High + + + |} + , Ok + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) + ) + ; (* Two updates, without xapi-api-version *) ( {| @@ -602,39 +712,76 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct High + + UPDATE-0001 + title + summary + description + special information + https://update.details.info + + + None + |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( None + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ; ( "UPDATE-0001" + , UpdateInfo. + { + id= "UPDATE-0001" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:50Z" + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Two updates *) ( {| - + UPDATE-0000 title @@ -660,60 +807,62 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ; ( "UPDATE-0001" - , UpdateInfo. - { - id= "UPDATE-0001" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ; ( "UPDATE-0001" + , UpdateInfo. + { + id= "UPDATE-0001" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:50Z" + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with deprecated guidances only *) ( {| - + UPDATE-0000 title @@ -747,56 +896,58 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= - [ - Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Gte - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ; Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Lt - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= + [ + Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Gte + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ; Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Lt + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with unknown guidance *) ( {| - + UPDATE-0000 title @@ -842,56 +993,58 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Recommended, [RebootHost]) - ; (Full, [RebootHost; RestartVM]) - ; (Mandatory, [RebootHost]) - ; (Livepatch, []) - ] - ; guidance_applicabilities= - [ - Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Gte - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ; Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Lt - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Recommended, [RebootHost]) + ; (Full, [RebootHost; RestartVM]) + ; (Mandatory, [RebootHost]) + ; (Livepatch, []) + ] + ; guidance_applicabilities= + [ + Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Gte + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ; Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Lt + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatches and livepatch guidance *) ( {| - + UPDATE-0000 title @@ -916,58 +1069,60 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "8346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.19.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ; LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "8346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.19.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ; LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatches and unknown livepatch guidance *) ( {| - + UPDATE-0000 title @@ -992,58 +1147,60 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RebootHost]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "8346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.19.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ; LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RebootHost]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "8346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.19.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ; LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatch guidance but empty livepatch *) ( {| - + UPDATE-0000 title @@ -1064,35 +1221,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartDeviceModel]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartDeviceModel]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with valid livepatches *) ( {| - + UPDATE-0000 title @@ -1115,47 +1274,49 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with invalid livepatches *) ( {| - + UPDATE-0000 title @@ -1178,35 +1339,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: empty guidance *) ( {| - + UPDATE-0000 title @@ -1227,35 +1390,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "empty guidance" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "empty guidance" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format only: empty guidance *) ( {| - + UPDATE-0000 title @@ -1273,35 +1438,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "guidance in new format only: empty guidance" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "guidance in new format only: empty guidance" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: empty mandatory and full *) ( {| - + UPDATE-0000 title @@ -1326,35 +1493,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "empty mandatory and full" - ; guidance= - [ - (Full, []) - ; (Mandatory, []) - ; (Recommended, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "empty mandatory and full" + ; guidance= + [ + (Full, []) + ; (Mandatory, []) + ; (Recommended, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory only *) ( {| - + UPDATE-0000 title @@ -1381,37 +1550,39 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "mandatory only" - ; guidance= - [ - ( Mandatory - , [RestartDeviceModel; EvacuateHost; RestartToolstack] - ) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "mandatory only" + ; guidance= + [ + ( Mandatory + , [RestartDeviceModel; EvacuateHost; RestartToolstack] + ) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory, recommended, full and livepatch *) ( {| - + UPDATE-0000 title @@ -1445,35 +1616,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "mandatory, recommended, full and livepatch" - ; guidance= - [ - (Full, [RebootHost]) - ; (Livepatch, [RestartDeviceModel]) - ; (Recommended, [EvacuateHost]) - ; (Mandatory, [RestartToolstack]) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "mandatory, recommended, full and livepatch" + ; guidance= + [ + (Full, [RebootHost]) + ; (Livepatch, [RestartDeviceModel]) + ; (Recommended, [EvacuateHost]) + ; (Mandatory, [RestartToolstack]) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory, recommended, full and livepatch *) ( {| - + UPDATE-0000 title @@ -1507,31 +1680,33 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "RestartVM in mandatory" - ; guidance= - [ - (Full, [RebootHost]) - ; (Livepatch, [RestartDeviceModel]) - ; (Recommended, [EvacuateHost]) - ; (Mandatory, [RestartVM]) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "RestartVM in mandatory" + ; guidance= + [ + (Full, [RebootHost]) + ; (Livepatch, [RestartDeviceModel]) + ; (Recommended, [EvacuateHost]) + ; (Mandatory, [RestartVM]) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ] end) diff --git a/ocaml/tests/test_xapi_db_upgrade.ml b/ocaml/tests/test_xapi_db_upgrade.ml index 6b488fec157..f14838ef6ab 100644 --- a/ocaml/tests/test_xapi_db_upgrade.ml +++ b/ocaml/tests/test_xapi_db_upgrade.ml @@ -38,14 +38,14 @@ let update_snapshots () = let a = T.make_vm ~__context ~name_label:"a" () in let a_snap = T.make_vm ~__context ~name_label:"a snap" () in Db.VM.set_snapshot_of ~__context ~self:a_snap ~value:a ; - Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_float 1.) ; + Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_unix_time 1.) ; let b = T.make_vm ~__context ~name_label:"b" () in let b_snap = T.make_vm ~__context ~name_label:"b snap" () in Db.VM.set_snapshot_of ~__context ~self:b_snap ~value:b ; - Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_float 1.) ; + Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_unix_time 1.) ; let b_snap2 = T.make_vm ~__context ~name_label:"b snap2" () in Db.VM.set_snapshot_of ~__context ~self:b_snap2 ~value:b ; - Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_float 2.) ; + Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_unix_time 2.) ; X.update_snapshots.fn ~__context ; let check_vm = Alcotest.check Alcotest_comparators.(ref ()) in (* a.parent = a_snap *) diff --git a/ocaml/tests/tests/looper.py b/ocaml/tests/tests/looper.py deleted file mode 100755 index 8977fc6efec..00000000000 --- a/ocaml/tests/tests/looper.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python - -print "Program attempts to log into an XAPI server, fetch a list of VMs and" -print "then calls VM.get_otherConfig on the first one in a loop" -print -import getopt, sys, xapi - -url = "http://localhost:8086" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] -print "Connecting to server on URL: ", url -print "(change with -u argument)" - -# Create an object to represent our server. -server = xapi.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.login_with_password("user", "passwd", "1.0", "xen-api-tests-looper") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.get_all(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] - -print "Getting the otherConfig of " + first_vm - -attempt = 0 -last = server.VM.get_otherConfig(session, first_vm) -while 1: - this = server.VM.get_otherConfig(session, first_vm) - if last <> this: - print "Got a different response!" - print "this = ", repr(this) - print "last = ", repr(last) - raise "Failed" - attempt = attempt + 1 - print attempt diff --git a/ocaml/tests/tests/looper2.py b/ocaml/tests/tests/looper2.py deleted file mode 100755 index 3e3395653ac..00000000000 --- a/ocaml/tests/tests/looper2.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/python - -print "Program attempts to log into an XAPI server to fetch a list of VMs and" -print "a list of debug objects. It then chooses the first debug object, " -print "queries the int->float map and then calls the 'recycle' message using" -print "that map as an argument" -print - -import getopt, sys, xapi - -url = "http://localhost:8086" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] - -print "Connecting to server on URL: ", url -print "(change with -u argument)" - -# Create an object to represent our server. -server = xapi.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.login_with_password("user", "passwd") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.get_all(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] - -debug_objs = server.Debug.get_all(session) -debug = debug_objs[0] -ifm = server.Debug.get_int_float_map(session, debug) -print "Got an int->float map: " + repr(ifm) - -print "doing the int_float_map recycle thing" - -attempt = 0 -while 1: - this = server.Debug.recycle_int_float_map(ifm) - if ifm <> this: - print "Got a different response!" - print "this = ", repr(this) - print "ifm = ", repr(ifm) - raise "Failed" - attempt = attempt + 1 - print attempt diff --git a/ocaml/vhd-tool/test/dummy_extent_reader.py b/ocaml/vhd-tool/test/dummy_extent_reader.py index 1c344af40ef..b692674dded 100755 --- a/ocaml/vhd-tool/test/dummy_extent_reader.py +++ b/ocaml/vhd-tool/test/dummy_extent_reader.py @@ -1,10 +1,9 @@ -#!/usr/bin/python +#!/usr/bin/python3 """ Dummy extent reader that returns a huge extent list """ -from __future__ import print_function import json import sys diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index 7c693a7a25c..d0d981309da 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -1956,7 +1956,7 @@ let vdi_introduce printer rpc session_id params = let virtual_size = 0L and physical_utilisation = 0L in let metadata_of_pool = Ref.null in let is_a_snapshot = false in - let snapshot_time = Date.never in + let snapshot_time = Date.epoch in let snapshot_of = Ref.null in let vdi = Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description @@ -3210,7 +3210,7 @@ exception Multiple_failure of (string * string) list let format_message msg = Printf.sprintf "Message: time=%s priority=%Ld name='%s'" - (Date.to_string msg.API.message_timestamp) + (Date.to_rfc3339 msg.API.message_timestamp) msg.API.message_priority msg.API.message_name let wrap_op printer pri rpc session_id op e = @@ -3220,7 +3220,7 @@ let wrap_op printer pri rpc session_id op e = try Client.Message.get ~rpc ~session_id ~cls:`VM ~obj_uuid:(safe_get_field (field_lookup e.fields "uuid")) - ~since:(Date.of_float now) + ~since:(Date.of_unix_time now) with _ -> [] in List.iter @@ -5272,7 +5272,7 @@ let with_license_server_changes printer rpc session_id params hosts f = current_license_servers ; let alerts = Client.Message.get_since ~rpc ~session_id - ~since:(Date.of_float (now -. 1.)) + ~since:(Date.of_unix_time (now -. 1.)) in let print_if_checkout_error (ref, msg) = if @@ -6245,7 +6245,7 @@ let license_of_host rpc session_id host = let rstr = Features.of_assoc_list params in let expiry = if List.mem_assoc "expiry" params then - Date.to_float (Date.of_string (List.assoc "expiry" params)) + Date.to_unix_time (Date.of_iso8601 (List.assoc "expiry" params)) else 0. in @@ -6280,7 +6280,7 @@ let diagnostic_license_status printer rpc session_id _params = ; String.sub h.uuid 0 8 ; Features.to_compact_string h.rstr ; h.edition - ; Date.to_string (Date.of_float h.expiry) + ; Date.to_rfc3339 (Date.of_unix_time h.expiry) ; Printf.sprintf "%.1f" ((h.expiry -. now) /. (24. *. 60. *. 60.)) ] ) @@ -7370,8 +7370,8 @@ let vmss_create printer rpc session_id params = failwith ("No default value for parameter " ^ param_name) in let name_label = List.assoc "name-label" params in - let ty = Record_util.string_to_vmss_type (get "type") in - let frequency = Record_util.string_to_vmss_frequency (get "frequency") in + let ty = Record_util.vmss_type_of_string (get "type") in + let frequency = Record_util.vmss_frequency_of_string (get "frequency") in let schedule = read_map_params "schedule" params in (* optional parameters with default values *) let name_description = get "name-description" ~default:"" in @@ -7753,7 +7753,8 @@ module SDN_controller = struct in let protocol = if List.mem_assoc "protocol" params then - Record_util.sdn_protocol_of_string (List.assoc "protocol" params) + Record_util.sdn_controller_protocol_of_string + (List.assoc "protocol" params) else `ssl in diff --git a/ocaml/xapi-cli-server/dune b/ocaml/xapi-cli-server/dune index ff3efb6c7b0..c1a8269dbb6 100644 --- a/ocaml/xapi-cli-server/dune +++ b/ocaml/xapi-cli-server/dune @@ -1,3 +1,13 @@ +(rule + (targets generated_record_utils.ml) + (deps + ../idl/ocaml_backend/gen_api_main.exe + ) + (action + (run %{deps} -filterinternal true -filter closed -mode utils -output + %{targets})) +) + (library (name xapi_cli_server) (modes best) @@ -35,7 +45,7 @@ xmlm xml-light2 ) - (preprocess (pps ppx_deriving_rpc)) + (preprocess (per_module ((pps ppx_deriving_rpc) Cli_operations))) (wrapped false) ) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 2c98955fffd..a7a4dd2ec72 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -12,83 +12,22 @@ * GNU Lesser General Public License for more details. *) (* conversion utils *) - -exception Record_failure of string - -let record_failure fmt = - Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt +(* NOTE: Unless conversion requires some custom logic, no new functions should + be added here. Automatically-generated functions with consistent behaviour + and naming are generated from the datamodel and included here. + If the custom logic is required, these functions should be shadowed and + justified here. + See: + _build/default/ocaml/xapi-cli-server/generated_record_utils.ml + for the generated code. And: + ~/xen-api/ocaml/idl/ocaml_backend/gen_api.ml + for the code generating it. +*) + +include Generated_record_utils let to_str = function Rpc.String x -> x | _ -> failwith "Invalid" -let certificate_type_to_string = function - | `host -> - "host" - | `host_internal -> - "host_internal" - | `ca -> - "ca" - -let class_to_string cls = - match cls with - | `VM -> - "VM" - | `Host -> - "Host" - | `SR -> - "SR" - | `Pool -> - "Pool" - | `VMPP -> - "VMPP" - | `VMSS -> - "VMSS" - | `PVS_proxy -> - "PVS_proxy" - | `VDI -> - "VDI" - | `Certificate -> - "Certificate" - | _ -> - "unknown" - -let string_to_class str = - match str with - | "VM" -> - `VM - | "Host" -> - `Host - | "SR" -> - `SR - | "Pool" -> - `Pool - | "VMPP" -> - `VMPP - | "VMSS" -> - `VMSS - | "PVS_proxy" -> - `PVS_proxy - | "VDI" -> - `VDI - | "Certificate" -> - `Certificate - | _ -> - failwith "Bad type" - -let power_state_to_string state = - match state with - | `Halted -> - "Halted" - | `Paused -> - "Paused" - | `Running -> - "Running" - | `Suspended -> - "Suspended" - | `ShuttingDown -> - "Shutting down" - | `Migrating -> - "Migrating" - let vm_operation_table = [ (`assert_operation_valid, "assertoperationvalid") @@ -138,12 +77,17 @@ let vm_operation_table = ; (`create_vtpm, "create_vtpm") ] +(* Intentional shadowing - data_souces_op, assertoperationinvalid, + changing_vcpus, changing_memory_limits, query_services, create_template + are inconsistent *) let vm_operation_to_string x = if not (List.mem_assoc x vm_operation_table) then "(unknown operation)" else List.assoc x vm_operation_table +(* Intentional shadowing - + In addition to the above, also inconsistent exceptions *) let string_to_vm_operation x = let table = List.map (fun (a, b) -> (b, a)) vm_operation_table in if not (List.mem_assoc x table) then @@ -154,70 +98,8 @@ let string_to_vm_operation x = else List.assoc x table -let vm_uefi_mode_of_string = function - | "setup" -> - `setup - | "user" -> - `user - | s -> - record_failure "Expected 'user','setup', got %s" s - -let vm_secureboot_readiness_to_string = function - | `not_supported -> - "not_supported" - | `disabled -> - "disabled" - | `first_boot -> - "first_boot" - | `ready -> - "ready" - | `ready_no_dbx -> - "ready_no_dbx" - | `setup_mode -> - "setup_mode" - | `certs_incomplete -> - "certs_incomplete" - -let pool_guest_secureboot_readiness_to_string = function - | `ready -> - "ready" - | `ready_no_dbx -> - "ready_no_dbx" - | `not_ready -> - "not_ready" - -let pool_operation_to_string = function - | `ha_enable -> - "ha_enable" - | `ha_disable -> - "ha_disable" - | `cluster_create -> - "cluster_create" - | `designate_new_master -> - "designate_new_master" - | `tls_verification_enable -> - "tls_verification_enable" - | `configure_repositories -> - "configure_repositories" - | `sync_updates -> - "sync_updates" - | `sync_bundle -> - "sync_bundle" - | `get_updates -> - "get_updates" - | `apply_updates -> - "apply_updates" - | `cert_refresh -> - "cert_refresh" - | `exchange_certificates_on_join -> - "exchange_certificates_on_join" - | `exchange_ca_certificates_on_join -> - "exchange_ca_certificates_on_join" - | `copy_primary_host_certs -> - "copy_primary_host_certs" - | `eject -> - "eject" - +(* Intentional shadowing - inconsistent behaviour: + vm_start, vm_resume, vm_migrate *) let host_operation_to_string = function | `provision -> "provision" @@ -240,66 +122,7 @@ let host_operation_to_string = function | `enable -> "enable" -let update_guidance_to_string = function - | `reboot_host -> - "reboot_host" - | `reboot_host_on_livepatch_failure -> - "reboot_host_on_livepatch_failure" - | `reboot_host_on_kernel_livepatch_failure -> - "reboot_host_on_kernel_livepatch_failure" - | `reboot_host_on_xen_livepatch_failure -> - "reboot_host_on_xen_livepatch_failure" - | `restart_toolstack -> - "restart_toolstack" - | `restart_device_model -> - "restart_device_model" - | `restart_vm -> - "restart_vm" - -let latest_synced_updates_applied_state_to_string = function - | `yes -> - "yes" - | `no -> - "no" - | `unknown -> - "unknown" - -let vdi_operation_to_string : API.vdi_operations -> string = function - | `clone -> - "clone" - | `copy -> - "copy" - | `resize -> - "resize" - | `resize_online -> - "resize_online" - | `destroy -> - "destroy" - | `force_unlock -> - "force_unlock" - | `snapshot -> - "snapshot" - | `mirror -> - "mirror" - | `forget -> - "forget" - | `update -> - "update" - | `generate_config -> - "generate_config" - | `enable_cbt -> - "enable_cbt" - | `disable_cbt -> - "disable_cbt" - | `data_destroy -> - "data_destroy" - | `list_changed_blocks -> - "list_changed_blocks" - | `set_on_boot -> - "set_on_boot" - | `blocked -> - "blocked" - +(* Intentional shadowing - inconsistent behaviour around _/. *) let sr_operation_to_string : API.storage_operations -> string = function | `scan -> "scan" @@ -342,132 +165,7 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let vbd_operation_to_string = function - | `attach -> - "attach" - | `eject -> - "eject" - | `insert -> - "insert" - | `plug -> - "plug" - | `unplug -> - "unplug" - | `unplug_force -> - "unplug_force" - | `pause -> - "pause" - | `unpause -> - "unpause" - -let vif_operation_to_string = function - | `attach -> - "attach" - | `plug -> - "plug" - | `unplug -> - "unplug" - | `unplug_force -> - "unplug_force" - -let vif_locking_mode_to_string = function - | `network_default -> - "network_default" - | `locked -> - "locked" - | `unlocked -> - "unlocked" - | `disabled -> - "disabled" - -let string_to_vif_locking_mode = function - | "network_default" -> - `network_default - | "locked" -> - `locked - | "unlocked" -> - `unlocked - | "disabled" -> - `disabled - | s -> - record_failure - "Expected 'network_default', 'locked', 'unlocked', 'disabled', got %s" s - -let vmss_type_to_string = function - | `snapshot -> - "snapshot" - | `checkpoint -> - "checkpoint" - | `snapshot_with_quiesce -> - "snapshot_with_quiesce" - -let string_to_vmss_type = function - | "snapshot" -> - `snapshot - | "checkpoint" -> - `checkpoint - | "snapshot_with_quiesce" -> - `snapshot_with_quiesce - | s -> - record_failure - "Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got %s" s - -let vmss_frequency_to_string = function - | `hourly -> - "hourly" - | `daily -> - "daily" - | `weekly -> - "weekly" - -let string_to_vmss_frequency = function - | "hourly" -> - `hourly - | "daily" -> - `daily - | "weekly" -> - `weekly - | s -> - record_failure "Expected 'hourly', 'daily', 'weekly', got %s" s - -let network_default_locking_mode_to_string = function - | `unlocked -> - "unlocked" - | `disabled -> - "disabled" - -let string_to_network_default_locking_mode = function - | "unlocked" -> - `unlocked - | "disabled" -> - `disabled - | s -> - record_failure "Expected 'unlocked' or 'disabled', got %s" s - -let network_purpose_to_string : API.network_purpose -> string = function - | `nbd -> - "nbd" - | `insecure_nbd -> - "insecure_nbd" - -let string_to_network_purpose : string -> API.network_purpose = function - | "nbd" -> - `nbd - | "insecure_nbd" -> - `insecure_nbd - | s -> - record_failure "Expected a network purpose string; got %s" s - -let vm_appliance_operation_to_string = function - | `start -> - "start" - | `clean_shutdown -> - "clean_shutdown" - | `hard_shutdown -> - "hard_shutdown" - | `shutdown -> - "shutdown" - +(* Is not defined in the datamodel - only defined here *) let cpu_feature_to_string f = match f with | `FPU -> @@ -599,19 +297,7 @@ let cpu_feature_to_string f = | `VMX -> "VMX" -let task_status_type_to_string s = - match s with - | `pending -> - "pending" - | `success -> - "success" - | `failure -> - "failure" - | `cancelling -> - "cancelling" - | `cancelled -> - "cancelled" - +(* Intentional shadowing - inconsistent capitalization *) let protocol_to_string = function | `vt100 -> "VT100" @@ -620,32 +306,19 @@ let protocol_to_string = function | `rdp -> "RDP" -let telemetry_frequency_to_string = function - | `daily -> - "daily" - | `weekly -> - "weekly" - | `monthly -> - "monthly" - +(* Intentional shadowing - inconsistent capitalization *) let task_allowed_operations_to_string s = match s with `cancel -> "Cancel" | `destroy -> "Destroy" +(* Is not defined in the datamodel - only defined here *) let alert_level_to_string s = match s with `Info -> "info" | `Warn -> "warning" | `Error -> "error" +(* Intentional shadowing - inconsistent capitalization *) let on_normal_exit_to_string x = match x with `destroy -> "Destroy" | `restart -> "Restart" -let string_to_on_normal_exit s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "restart" -> - `restart - | _ -> - record_failure "Expected 'destroy' or 'restart', got %s" s - +(* Intentional shadowing - inconsistent capitalization *) let on_crash_behaviour_to_string x = match x with | `destroy -> @@ -661,27 +334,7 @@ let on_crash_behaviour_to_string x = | `rename_restart -> "Rename restart" -let string_to_on_crash_behaviour s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "coredump_and_destroy" -> - `coredump_and_destroy - | "restart" -> - `restart - | "coredump_and_restart" -> - `coredump_and_restart - | "preserve" -> - `preserve - | "rename_restart" -> - `rename_restart - | _ -> - record_failure - "Expected 'destroy', 'coredump_and_destroy', \ - 'restart','coredump_and_restart', 'preserve' or 'rename_restart', got \ - %s" - s - +(* Intentional shadowing - inconsistent capitalization *) let on_softreboot_behaviour_to_string x = match x with | `destroy -> @@ -693,73 +346,6 @@ let on_softreboot_behaviour_to_string x = | `soft_reboot -> "Soft reboot" -let string_to_on_softreboot_behaviour s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "restart" -> - `restart - | "preserve" -> - `preserve - | "soft_reboot" -> - `soft_reboot - | _ -> - record_failure - "Expected 'destroy', 'coredump_and_destroy', 'restart', \ - 'coredump_and_restart', 'preserve', 'soft_reboot' or \ - 'rename_restart', got %s" - s - -let host_display_to_string h = - match h with - | `enabled -> - "enabled" - | `enable_on_reboot -> - "enable_on_reboot" - | `disabled -> - "disabled" - | `disable_on_reboot -> - "disable_on_reboot" - -let host_sched_gran_of_string s = - match String.lowercase_ascii s with - | "core" -> - `core - | "cpu" -> - `cpu - | "socket" -> - `socket - | _ -> - record_failure "Expected 'core','cpu', 'socket', got %s" s - -let host_sched_gran_to_string = function - | `core -> - "core" - | `cpu -> - "cpu" - | `socket -> - "socket" - -let host_numa_affinity_policy_to_string = function - | `any -> - "any" - | `best_effort -> - "best_effort" - | `default_policy -> - "default_policy" - -let host_numa_affinity_policy_of_string a = - match String.lowercase_ascii a with - | "any" -> - `any - | "best_effort" -> - `best_effort - | "default_policy" -> - `default_policy - | s -> - record_failure "Expected 'any', 'best_effort' or 'default_policy', got %s" - s - let pci_dom0_access_to_string x = host_display_to_string x let string_to_vdi_onboot s = @@ -771,43 +357,15 @@ let string_to_vdi_onboot s = | _ -> record_failure "Expected 'persist' or 'reset', got %s" s -let string_to_vbd_mode s = - match String.lowercase_ascii s with - | "ro" -> - `RO - | "rw" -> - `RW - | _ -> - record_failure "Expected 'RO' or 'RW', got %s" s - +(* Intentional shadowing - inconsistent capitalization *) let vbd_mode_to_string = function `RO -> "ro" | `RW -> "rw" -let string_to_vbd_type s = - match String.lowercase_ascii s with - | "cd" -> - `CD - | "disk" -> - `Disk - | "floppy" -> - `Floppy - | _ -> - record_failure "Expected 'CD' or 'Disk', got %s" s - -let power_to_string h = - match h with - | `Halted -> - "halted" - | `Paused -> - "paused" - | `Running -> - "running" - | `Suspended -> - "suspended" - | `ShuttingDown -> - "shutting down" - | `Migrating -> - "migrating" +(* Some usage sites rely on the output of the + conversion function to be lowercase*) +let vm_power_state_to_lowercase_string h = + vm_power_state_to_string h |> String.uncapitalize_ascii +(* Intentional shadowing - inconsistent capitalization *) let vdi_type_to_string t = match t with | `system -> @@ -833,93 +391,7 @@ let vdi_type_to_string t = | `cbt_metadata -> "CBT metadata" -let ip_configuration_mode_to_string = function - | `None -> - "None" - | `DHCP -> - "DHCP" - | `Static -> - "Static" - -let ip_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "dhcp" -> - `DHCP - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'dhcp','none' or 'static', got %s" s - -let vif_ipv4_configuration_mode_to_string = function - | `None -> - "None" - | `Static -> - "Static" - -let vif_ipv4_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'none' or 'static', got %s" s - -let ipv6_configuration_mode_to_string = function - | `None -> - "None" - | `DHCP -> - "DHCP" - | `Static -> - "Static" - | `Autoconf -> - "Autoconf" - -let ipv6_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "dhcp" -> - `DHCP - | "none" -> - `None - | "static" -> - `Static - | "autoconf" -> - `Autoconf - | s -> - record_failure "Expected 'dhcp','none' 'autoconf' or 'static', got %s" s - -let vif_ipv6_configuration_mode_to_string = function - | `None -> - "None" - | `Static -> - "Static" - -let vif_ipv6_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'none' or 'static', got %s" s - -let primary_address_type_to_string = function - | `IPv4 -> - "IPv4" - | `IPv6 -> - "IPv6" - -let primary_address_type_of_string m = - match String.lowercase_ascii m with - | "ipv4" -> - `IPv4 - | "ipv6" -> - `IPv6 - | s -> - record_failure "Expected 'ipv4' or 'ipv6', got %s" s - +(* Intentional shadowing - inconsistent underscore/dash *) let bond_mode_to_string = function | `balanceslb -> "balance-slb" @@ -928,6 +400,7 @@ let bond_mode_to_string = function | `lacp -> "lacp" +(* Intentional shadowing - inconsistent underscore/dash, custom case *) let bond_mode_of_string m = match String.lowercase_ascii m with | "balance-slb" | "" -> @@ -939,12 +412,14 @@ let bond_mode_of_string m = | s -> record_failure "Invalid bond mode. Got %s" s +(* Intentional shadowing - inconsistent underscore/dash *) let allocation_algorithm_to_string = function | `depth_first -> "depth-first" | `breadth_first -> "breadth-first" +(* Intentional shadowing - inconsistent underscore/dash *) let allocation_algorithm_of_string a = match String.lowercase_ascii a with | "depth-first" -> @@ -954,6 +429,7 @@ let allocation_algorithm_of_string a = | s -> record_failure "Invalid allocation algorithm. Got %s" s +(* Intentional shadowing - inconsistent underscore/dash *) let pvs_proxy_status_to_string = function | `stopped -> "stopped" @@ -981,57 +457,7 @@ let bool_of_string s = record_failure "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s -let sdn_protocol_of_string s = - match String.lowercase_ascii s with - | "ssl" -> - `ssl - | "pssl" -> - `pssl - | _ -> - record_failure "Expected 'ssl','pssl', got %s" s - -let sdn_protocol_to_string = function `ssl -> "ssl" | `pssl -> "pssl" - -let tunnel_protocol_of_string s = - match String.lowercase_ascii s with - | "gre" -> - `gre - | "vxlan" -> - `vxlan - | _ -> - record_failure "Expected 'gre','vxlan', got %s" s - -let tunnel_protocol_to_string = function `gre -> "gre" | `vxlan -> "vxlan" - -let pif_igmp_status_to_string = function - | `enabled -> - "enabled" - | `disabled -> - "disabled" - | `unknown -> - "unknown" - -let vusb_operation_to_string = function - | `attach -> - "attach" - | `plug -> - "plug" - | `unplug -> - "unplug" - -let network_sriov_configuration_mode_to_string = function - | `sysfs -> - "sysfs" - | `modprobe -> - "modprobe" - | `manual -> - "manual" - | `unknown -> - "unknown" - -let on_boot_to_string onboot = - match onboot with `reset -> "reset" | `persist -> "persist" - +(* Intentional shadowing - inconsistent naming *) let tristate_to_string tristate = match tristate with | `yes -> @@ -1041,6 +467,7 @@ let tristate_to_string tristate = | `unspecified -> "unspecified" +(* Intentional shadowing - inconsistent underscore/dash *) let domain_type_to_string = function | `hvm -> "hvm" @@ -1053,6 +480,7 @@ let domain_type_to_string = function | `unspecified -> "unspecified" +(* Intentional shadowing - inconsistent underscore/dash *) let domain_type_of_string x = match String.lowercase_ascii x with | "hvm" -> @@ -1066,9 +494,6 @@ let domain_type_of_string x = | s -> record_failure "Invalid domain type. Got %s" s -let vtpm_operation_to_string (op : API.vtpm_operations) = - match op with `destroy -> "destroy" - (** parse [0-9]*(b|bytes|kib|mib|gib|tib)* to bytes *) let bytes_of_string str = let ( ** ) a b = Int64.mul a b in @@ -1110,27 +535,14 @@ let mac_from_int_array macs = (* generate a random mac that is locally administered *) let random_mac_local () = mac_from_int_array (Array.make 6 (Random.int 0x100)) -let update_sync_frequency_to_string = function - | `daily -> - "daily" - | `weekly -> - "weekly" - -let update_sync_frequency_of_string s = - match String.lowercase_ascii s with - | "daily" -> - `daily - | "weekly" -> - `weekly - | _ -> - record_failure "Expected 'daily', 'weekly', got %s" s - +(* Intentional shadowing - inconsistent underscore/dash *) let vm_placement_policy_to_string = function | `normal -> "normal" | `anti_affinity -> "anti-affinity" +(* Intentional shadowing - inconsistent underscore/dash *) let vm_placement_policy_of_string a = match String.lowercase_ascii a with | "normal" -> @@ -1139,5 +551,3 @@ let vm_placement_policy_of_string a = `anti_affinity | s -> record_failure "Invalid VM placement policy, got %s" s - -let repo_origin_to_string = function `remote -> "remote" | `bundle -> "bundle" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 426b04b758b..3798280d082 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -370,13 +370,13 @@ let message_record rpc session_id message = ~get:(fun () -> Int64.to_string (x ()).API.message_priority) () ; make_field ~name:"class" - ~get:(fun () -> Record_util.class_to_string (x ()).API.message_cls) + ~get:(fun () -> Record_util.cls_to_string (x ()).API.message_cls) () ; make_field ~name:"obj-uuid" ~get:(fun () -> (x ()).API.message_obj_uuid) () ; make_field ~name:"timestamp" - ~get:(fun () -> Date.to_string (x ()).API.message_timestamp) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.message_timestamp) () ; make_field ~name:"body" ~get:(fun () -> (x ()).API.message_body) () ] @@ -749,10 +749,10 @@ let task_record rpc session_id task = ; make_field ~name:"type" ~get:(fun () -> (x ()).API.task_type) () ; make_field ~name:"result" ~get:(fun () -> (x ()).API.task_result) () ; make_field ~name:"created" - ~get:(fun () -> Date.to_string (x ()).API.task_created) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.task_created) () ; make_field ~name:"finished" - ~get:(fun () -> Date.to_string (x ()).API.task_finished) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.task_finished) () ; make_field ~name:"error_info" ~get:(fun () -> concat_with_semi (x ()).API.task_error_info) @@ -816,23 +816,23 @@ let vif_record rpc session_id vif = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vif_operation_to_string + map_and_concat Record_util.vif_operations_to_string (x ()).API.vIF_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vif_operation_to_string + List.map Record_util.vif_operations_to_string (x ()).API.vIF_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vif_operation_to_string b) + (fun (_, b) -> Record_util.vif_operations_to_string b) (x ()).API.vIF_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vif_operation_to_string b) + (fun (_, b) -> Record_util.vif_operations_to_string b) (x ()).API.vIF_current_operations ) () @@ -920,7 +920,7 @@ let vif_record rpc session_id vif = ) ~set:(fun value -> Client.VIF.set_locking_mode ~rpc ~session_id ~self:vif - ~value:(Record_util.string_to_vif_locking_mode value) + ~value:(Record_util.vif_locking_mode_of_string value) ) () ; make_field ~name:"ipv4-allowed" @@ -1070,7 +1070,7 @@ let net_record rpc session_id net = ~set:(fun value -> Client.Network.set_default_locking_mode ~rpc ~session_id ~network:net - ~value:(Record_util.string_to_network_default_locking_mode value) + ~value:(Record_util.network_default_locking_mode_of_string value) ) () ; make_field ~name:"purpose" @@ -1084,11 +1084,11 @@ let net_record rpc session_id net = ) ~add_to_set:(fun s -> Client.Network.add_purpose ~rpc ~session_id ~self:net - ~value:(Record_util.string_to_network_purpose s) + ~value:(Record_util.network_purpose_of_string s) ) ~remove_from_set:(fun s -> Client.Network.remove_purpose ~rpc ~session_id ~self:net - ~value:(Record_util.string_to_network_purpose s) + ~value:(Record_util.network_purpose_of_string s) ) () ] @@ -1189,23 +1189,23 @@ let pool_record rpc session_id pool = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.pool_operation_to_string + map_and_concat Record_util.pool_allowed_operations_to_string (x ()).API.pool_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.pool_operation_to_string + List.map Record_util.pool_allowed_operations_to_string (x ()).API.pool_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.pool_operation_to_string b) + (fun (_, b) -> Record_util.pool_allowed_operations_to_string b) (x ()).API.pool_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.pool_operation_to_string b) + (fun (_, b) -> Record_util.pool_allowed_operations_to_string b) (x ()).API.pool_current_operations ) () @@ -1447,11 +1447,11 @@ let pool_record rpc session_id pool = () ; make_field ~name:"telemetry-next-collection" ~get:(fun () -> - (x ()).API.pool_telemetry_next_collection |> Date.to_string + (x ()).API.pool_telemetry_next_collection |> Date.to_rfc3339 ) () ; make_field ~name:"last-update-sync" - ~get:(fun () -> Date.to_string (x ()).API.pool_last_update_sync) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.pool_last_update_sync) () ; make_field ~name:"update-sync-frequency" ~get:(fun () -> @@ -1518,7 +1518,7 @@ let vmss_record rpc session_id vmss = ~get:(fun () -> Record_util.vmss_type_to_string (x ()).API.vMSS_type) ~set:(fun x -> Client.VMSS.set_type ~rpc ~session_id ~self:vmss - ~value:(Record_util.string_to_vmss_type x) + ~value:(Record_util.vmss_type_of_string x) ) () ; make_field ~name:"retained-snapshots" @@ -1536,7 +1536,7 @@ let vmss_record rpc session_id vmss = ) ~set:(fun x -> Client.VMSS.set_frequency ~rpc ~session_id ~self:vmss - ~value:(Record_util.string_to_vmss_frequency x) + ~value:(Record_util.vmss_frequency_of_string x) ) () ; make_field ~name:"schedule" @@ -1550,7 +1550,7 @@ let vmss_record rpc session_id vmss = ) () ; make_field ~name:"last-run-time" - ~get:(fun () -> Date.to_string (x ()).API.vMSS_last_run_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vMSS_last_run_time) () ; make_field ~name:"VMs" ~get:(fun () -> @@ -1842,7 +1842,7 @@ let vm_record rpc session_id vm = ~get:(fun () -> get_uuids_from_refs (x ()).API.vM_snapshots) () ; make_field ~name:"snapshot-time" - ~get:(fun () -> Date.to_string (x ()).API.vM_snapshot_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vM_snapshot_time) () ; make_field ~name:"transportable-snapshot-id" ~hidden:true ~get:(fun () -> (x ()).API.vM_transportable_snapshot_id) @@ -1860,7 +1860,10 @@ let vm_record rpc session_id vm = ~get:(fun () -> string_of_bool (x ()).API.vM_is_control_domain) () ; make_field ~name:"power-state" - ~get:(fun () -> Record_util.power_to_string (x ()).API.vM_power_state) + ~get:(fun () -> + Record_util.vm_power_state_to_lowercase_string + (x ()).API.vM_power_state + ) () ; make_field ~name:"memory-actual" ~get:(fun () -> @@ -1959,7 +1962,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_shutdown ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_normal_exit x) + ~value:(Record_util.on_normal_exit_of_string x) ) () ; make_field ~name:"actions-after-softreboot" @@ -1969,7 +1972,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_softreboot ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_softreboot_behaviour x) + ~value:(Record_util.on_softreboot_behavior_of_string x) ) () ; make_field ~name:"actions-after-reboot" @@ -1979,7 +1982,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_reboot ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_normal_exit x) + ~value:(Record_util.on_normal_exit_of_string x) ) () ; make_field ~name:"actions-after-crash" @@ -1989,7 +1992,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_crash ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_crash_behaviour x) + ~value:(Record_util.on_crash_behaviour_of_string x) ) () ; make_field ~name:"console-uuids" @@ -2261,14 +2264,14 @@ let vm_record rpc session_id vm = ; make_field ~name:"start-time" ~get:(fun () -> Option.fold ~none:unknown_time - ~some:(fun m -> Date.to_string m.API.vM_metrics_start_time) + ~some:(fun m -> Date.to_rfc3339 m.API.vM_metrics_start_time) (xm ()) ) () ; make_field ~name:"install-time" ~get:(fun () -> Option.fold ~none:unknown_time - ~some:(fun m -> Date.to_string m.API.vM_metrics_install_time) + ~some:(fun m -> Date.to_rfc3339 m.API.vM_metrics_install_time) (xm ()) ) () @@ -2301,6 +2304,18 @@ let vm_record rpc session_id vm = (xgm ()) ) () + ; make_field ~name:"netbios-name" + ~get:(fun () -> + Option.fold ~none:nid + ~some:(fun m -> get_from_map m.API.vM_guest_metrics_netbios_name) + (xgm ()) + ) + ~get_map:(fun () -> + Option.fold ~none:[] + ~some:(fun m -> m.API.vM_guest_metrics_netbios_name) + (xgm ()) + ) + () ; make_field ~name:"PV-drivers-version" ~get:(fun () -> Option.fold ~none:nid @@ -2395,7 +2410,9 @@ let vm_record rpc session_id vm = ; make_field ~name:"guest-metrics-last-updated" ~get:(fun () -> Option.fold ~none:nid - ~some:(fun m -> Date.to_string m.API.vM_guest_metrics_last_updated) + ~some:(fun m -> + Date.to_rfc3339 m.API.vM_guest_metrics_last_updated + ) (xgm ()) ) () @@ -2544,7 +2561,7 @@ let vm_record rpc session_id vm = () ; make_field ~name:"pending-guidances" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances ) () @@ -2553,13 +2570,13 @@ let vm_record rpc session_id vm = () ; make_field ~name:"pending-guidances-recommended" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances_recommended ) () ; make_field ~name:"pending-guidances-full" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances_full ) () @@ -2596,7 +2613,7 @@ let host_crashdump_record rpc session_id host = ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crashdump_host) () ; make_field ~name:"timestamp" - ~get:(fun () -> Date.to_string (x ()).API.host_crashdump_timestamp) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.host_crashdump_timestamp) () ; make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.host_crashdump_size) @@ -3178,7 +3195,7 @@ let host_record rpc session_id host = () ; make_field ~name:"pending-guidances" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances ) () @@ -3188,7 +3205,7 @@ let host_record rpc session_id host = ) () ; make_field ~name:"last-software-update" - ~get:(fun () -> Date.to_string (x ()).API.host_last_software_update) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.host_last_software_update) () ; make_field ~name:"latest-synced-updates-applied" ~get:(fun () -> @@ -3198,13 +3215,13 @@ let host_record rpc session_id host = () ; make_field ~name:"pending-guidances-recommended" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances_recommended ) () ; make_field ~name:"pending-guidances-full" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances_full ) () @@ -3259,27 +3276,27 @@ let vdi_record rpc session_id vdi = ~get:(fun () -> get_uuids_from_refs (x ()).API.vDI_snapshots) () ; make_field ~name:"snapshot-time" - ~get:(fun () -> Date.to_string (x ()).API.vDI_snapshot_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vDI_snapshot_time) () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vdi_operation_to_string + map_and_concat Record_util.vdi_operations_to_string (x ()).API.vDI_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vdi_operation_to_string + List.map Record_util.vdi_operations_to_string (x ()).API.vDI_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vdi_operation_to_string b) + (fun (_, b) -> Record_util.vdi_operations_to_string b) (x ()).API.vDI_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vdi_operation_to_string b) + (fun (_, b) -> Record_util.vdi_operations_to_string b) (x ()).API.vDI_current_operations ) () @@ -3356,7 +3373,7 @@ let vdi_record rpc session_id vdi = ~get:(fun () -> Record_util.on_boot_to_string (x ()).API.vDI_on_boot) ~set:(fun onboot -> Client.VDI.set_on_boot ~rpc ~session_id ~self:vdi - ~value:(Record_util.string_to_vdi_onboot onboot) + ~value:(Record_util.on_boot_of_string onboot) ) () ; make_field ~name:"allow-caching" @@ -3443,23 +3460,23 @@ let vbd_record rpc session_id vbd = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vbd_operation_to_string + map_and_concat Record_util.vbd_operations_to_string (x ()).API.vBD_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vbd_operation_to_string + List.map Record_util.vbd_operations_to_string (x ()).API.vBD_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vbd_operation_to_string b) + (fun (_, b) -> Record_util.vbd_operations_to_string b) (x ()).API.vBD_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vbd_operation_to_string b) + (fun (_, b) -> Record_util.vbd_operations_to_string b) (x ()).API.vBD_current_operations ) () @@ -3486,7 +3503,7 @@ let vbd_record rpc session_id vbd = ) ~set:(fun mode -> Client.VBD.set_mode ~rpc ~session_id ~self:vbd - ~value:(Record_util.string_to_vbd_mode mode) + ~value:(Record_util.vbd_mode_of_string mode) ) () ; make_field ~name:"type" @@ -3501,7 +3518,7 @@ let vbd_record rpc session_id vbd = ) ~set:(fun ty -> Client.VBD.set_type ~rpc ~session_id ~self:vbd - ~value:(Record_util.string_to_vbd_type ty) + ~value:(Record_util.vbd_type_of_string ty) ) () ; make_field ~name:"unpluggable" @@ -4727,7 +4744,7 @@ let sdn_controller_record rpc session_id sdn_controller = () ; make_field ~name:"protocol" ~get:(fun () -> - Record_util.sdn_protocol_to_string + Record_util.sdn_controller_protocol_to_string (x ()).API.sDN_controller_protocol ) () @@ -4932,23 +4949,23 @@ let vusb_record rpc session_id vusb = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vusb_operation_to_string + map_and_concat Record_util.vusb_operations_to_string (x ()).API.vUSB_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vusb_operation_to_string + List.map Record_util.vusb_operations_to_string (x ()).API.vUSB_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vusb_operation_to_string b) + (fun (_, b) -> Record_util.vusb_operations_to_string b) (x ()).API.vUSB_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vusb_operation_to_string b) + (fun (_, b) -> Record_util.vusb_operations_to_string b) (x ()).API.vUSB_current_operations ) () @@ -5103,7 +5120,7 @@ let cluster_host_record rpc session_id cluster_host = () ; make_field ~name:"last-update-live" ~get:(fun () -> - (x ()).API.cluster_host_last_update_live |> Date.to_string + (x ()).API.cluster_host_last_update_live |> Date.to_rfc3339 ) () ; make_field ~name:"allowed-operations" @@ -5169,10 +5186,10 @@ let certificate_record rpc session_id certificate = ~get:(fun () -> (x ()).API.certificate_host |> get_uuid_from_ref) () ; make_field ~name:"not-before" - ~get:(fun () -> (x ()).API.certificate_not_before |> Date.to_string) + ~get:(fun () -> (x ()).API.certificate_not_before |> Date.to_rfc3339) () ; make_field ~name:"not-after" - ~get:(fun () -> (x ()).API.certificate_not_after |> Date.to_string) + ~get:(fun () -> (x ()).API.certificate_not_after |> Date.to_rfc3339) () ; make_field ~name:"fingerprint" ~get:(fun () -> (x ()).API.certificate_fingerprint) @@ -5242,7 +5259,7 @@ let repository_record rpc session_id repository = () ; make_field ~name:"origin" ~get:(fun () -> - Record_util.repo_origin_to_string (x ()).API.repository_origin + Record_util.origin_to_string (x ()).API.repository_origin ) () ] @@ -5292,11 +5309,11 @@ let vtpm_record rpc session_id vtpm = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vtpm_operation_to_string + map_and_concat Record_util.vtpm_operations_to_string (x ()).API.vTPM_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vtpm_operation_to_string + List.map Record_util.vtpm_operations_to_string (x ()).API.vTPM_allowed_operations ) () diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index 53d9684561f..97880cde57a 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -512,6 +512,8 @@ let sr_requires_upgrade = add_error "SR_REQUIRES_UPGRADE" let sr_is_cache_sr = add_error "SR_IS_CACHE_SR" +let sr_unhealthy = add_error "SR_UNHEALTHY" + let vdi_in_use = add_error "VDI_IN_USE" let vdi_is_sharable = add_error "VDI_IS_SHARABLE" diff --git a/ocaml/xapi-idl/dune b/ocaml/xapi-idl/dune new file mode 100644 index 00000000000..85c1a3c24e2 --- /dev/null +++ b/ocaml/xapi-idl/dune @@ -0,0 +1 @@ +(data_only_dirs designs xen-api-plugin) diff --git a/ocaml/xapi-idl/gpumon/dune b/ocaml/xapi-idl/gpumon/dune index ccd184e9098..de10e06dae6 100644 --- a/ocaml/xapi-idl/gpumon/dune +++ b/ocaml/xapi-idl/gpumon/dune @@ -11,7 +11,7 @@ xapi-log ) (wrapped false) - (preprocess (pps ppx_deriving_rpc))) + (preprocess (per_module ((pps ppx_deriving_rpc) Gpumon_interface)))) (executable (name gpumon_cli) diff --git a/ocaml/xapi-idl/lib_test/device_number_test.ml b/ocaml/xapi-idl/lib_test/device_number_test.ml index fc8d5b210f1..1e32115cd16 100644 --- a/ocaml/xapi-idl/lib_test/device_number_test.ml +++ b/ocaml/xapi-idl/lib_test/device_number_test.ml @@ -133,10 +133,13 @@ let test_2_way_convert = let original = of_disk_number hvm disk_number |> Option.get in let of_linux = of_linux_device (to_linux_device original) |> Option.get in let of_xenstore = of_xenstore_key (to_xenstore_key original) in - Alcotest.check device_number_equal_linux - "of_linux must be equal to original" original of_linux ; - Alcotest.check device_number "of_xenstore must be equal to original" - original of_xenstore + (* use ~pos instead of msg: a non-empty msg causes the formatter to be flushed, + and messages printed on stdout, which is very slow if we do this in a loop a million times + *) + Alcotest.check' ~pos:__POS__ ~msg:"" device_number_equal_linux + ~expected:original ~actual:of_linux ; + Alcotest.check' ~pos:__POS__ ~msg:"" device_number ~expected:original + ~actual:of_xenstore in let max_d = (1 lsl 20) - 1 in diff --git a/ocaml/xapi-idl/lib_test/dune b/ocaml/xapi-idl/lib_test/dune index 1b1e8193ca7..689abf9b5eb 100644 --- a/ocaml/xapi-idl/lib_test/dune +++ b/ocaml/xapi-idl/lib_test/dune @@ -1,3 +1,5 @@ +(data_only_dirs test_data) + (library (name test_lib) (modules idl_test_common) @@ -6,11 +8,9 @@ (wrapped false) ) -(test +(executable (name guard_interfaces_test) - (package xapi-idl) (modules guard_interfaces_test) - (deps (source_tree test_data)) (libraries test_lib xapi-idl.guard.privileged @@ -18,6 +18,13 @@ ) ) +(rule + (alias runtest) + (package xapi-idl) + (deps (:exe ./guard_interfaces_test.exe) (source_tree test_data/guard)) + (action (run %{exe})) +) + (test (name device_number_test) (package xapi-idl) @@ -34,7 +41,6 @@ (modes exe) (package xapi-idl) (modules (:standard \ idl_test_common guard_interfaces_test device_number_test)) - (deps (source_tree test_data)) (libraries alcotest cohttp_posix @@ -61,3 +67,10 @@ xapi-log ) (preprocess (per_module ((pps ppx_deriving_rpc) Task_server_test Updates_test)))) + +(rule + (alias runtest) + (package xapi-idl) + (deps (:exe ./test.exe) (source_tree test_data/guard)) + (action (run %{exe})) +) diff --git a/ocaml/xapi-idl/rrd/dune b/ocaml/xapi-idl/rrd/dune index 9462c9341e6..8a427a965e3 100644 --- a/ocaml/xapi-idl/rrd/dune +++ b/ocaml/xapi-idl/rrd/dune @@ -39,8 +39,7 @@ (re_export xapi-idl.rrd.interface.types) xapi-rrd ) - (wrapped false) - (preprocess (pps ppx_deriving_rpc))) + (wrapped false)) (executable (name rrd_cli) diff --git a/ocaml/xapi-idl/storage/dune b/ocaml/xapi-idl/storage/dune index 05f146429bc..1ca965d4368 100644 --- a/ocaml/xapi-idl/storage/dune +++ b/ocaml/xapi-idl/storage/dune @@ -10,7 +10,7 @@ xapi-idl ) (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (preprocess (pps ppx_deriving_rpc))) (library (name xcp_storage_interface) @@ -28,7 +28,7 @@ xapi-log ) (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (preprocess (pps ppx_deriving_rpc ppx_deriving.show))) (library (name xcp_storage) @@ -43,8 +43,7 @@ xapi-idl.storage.interface xapi-stdext-date ) - (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (wrapped false)) (test (name storage_test) @@ -58,8 +57,7 @@ xapi-idl xapi-idl.storage xapi-idl.storage.interface - ) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + )) (test (name suite) diff --git a/ocaml/xapi-idl/storage/storage_interface.ml b/ocaml/xapi-idl/storage/storage_interface.ml index 698997ac0cd..f5bd93de60b 100644 --- a/ocaml/xapi-idl/storage/storage_interface.ml +++ b/ocaml/xapi-idl/storage/storage_interface.ml @@ -206,8 +206,7 @@ type vdi_info = { ; (* sm_config: workaround via XenAPI *) metadata_of_pool: string [@default ""] ; is_a_snapshot: bool [@default false] - ; snapshot_time: string - [@default Xapi_stdext_date.Date.to_string Xapi_stdext_date.Date.never] + ; snapshot_time: string [@default Xapi_stdext_date.Date.(to_rfc3339 epoch)] ; snapshot_of: Vdi.t [@default Vdi.of_string ""] ; (* managed: workaround via XenAPI *) read_only: bool [@default false] @@ -233,7 +232,7 @@ let default_vdi_info = failwith (Printf.sprintf "Error creating default_vdi_info: %s" m) type sr_health = Healthy | Recovering | Unreachable | Unavailable -[@@deriving rpcty] +[@@deriving rpcty, show {with_path= false}] type sr_info = { sr_uuid: string option @@ -354,6 +353,7 @@ module Errors = struct | Cancelled of string | Redirect of string option | Sr_attached of string + | Sr_unhealthy of string * sr_health | Unimplemented of string | Activated_on_another_host of uuid | Duplicated_key of string @@ -617,12 +617,24 @@ module StorageAPI (R : RPC) = struct let destroy = declare "SR.destroy" [] (dbg_p @-> sr_p @-> returning unit_p err) - (** [scan task sr] returns a list of VDIs contained within an attached SR *) + (** [scan task sr] returns a list of VDIs contained within an attached SR. + @deprecated This function is deprecated, and is only here to keep backward + compatibility with old xapis that call Remote.SR.scan during SXM. + Use the scan2 function instead. + *) let scan = let open TypeCombinators in let result = Param.mk ~name:"result" (list vdi_info) in declare "SR.scan" [] (dbg_p @-> sr_p @-> returning result err) + (** [scan2 task sr] returns a list of VDIs contained within an attached SR, + as well as the sr_info of the scanned [sr]. This operation is implemented as + a combination of scan and stats. *) + let scan2 = + let open TypeCombinators in + let result = Param.mk ~name:"result" (pair (list vdi_info, sr_info)) in + declare "SR.scan2" [] (dbg_p @-> sr_p @-> returning result err) + (** [update_snapshot_info_src sr vdi url dest dest_vdi snapshot_pairs] * updates the fields is_a_snapshot, snapshot_time and snapshot_of for a * list of snapshots on a remote SR. *) @@ -1160,6 +1172,8 @@ module type Server_impl = sig val scan : context -> dbg:debug_info -> sr:sr -> vdi_info list + val scan2 : context -> dbg:debug_info -> sr:sr -> vdi_info list * sr_info + val update_snapshot_info_src : context -> dbg:debug_info @@ -1449,6 +1463,7 @@ module Server (Impl : Server_impl) () = struct S.SR.reset (fun dbg sr -> Impl.SR.reset () ~dbg ~sr) ; S.SR.destroy (fun dbg sr -> Impl.SR.destroy () ~dbg ~sr) ; S.SR.scan (fun dbg sr -> Impl.SR.scan () ~dbg ~sr) ; + S.SR.scan2 (fun dbg sr -> Impl.SR.scan2 () ~dbg ~sr) ; S.SR.update_snapshot_info_src (fun dbg sr vdi url dest dest_vdi snapshot_pairs verify_dest -> Impl.SR.update_snapshot_info_src () ~dbg ~sr ~vdi ~url ~dest ~dest_vdi diff --git a/ocaml/xapi-idl/storage/storage_skeleton.ml b/ocaml/xapi-idl/storage/storage_skeleton.ml index e91246b3146..25283ed473b 100644 --- a/ocaml/xapi-idl/storage/storage_skeleton.ml +++ b/ocaml/xapi-idl/storage/storage_skeleton.ml @@ -68,6 +68,8 @@ module SR = struct let scan ctx ~dbg ~sr = u "SR.scan" + let scan2 ctx ~dbg ~sr = u "SR.scan2" + let update_snapshot_info_src ctx ~dbg ~sr ~vdi ~url ~dest ~dest_vdi ~snapshot_pairs = u "SR.update_snapshot_info_src" diff --git a/ocaml/xapi-idl/xen/dune b/ocaml/xapi-idl/xen/dune index 16ed23ecd22..83266865537 100644 --- a/ocaml/xapi-idl/xen/dune +++ b/ocaml/xapi-idl/xen/dune @@ -30,7 +30,7 @@ ) (flags (:standard -w -27)) (wrapped false) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv))) + (preprocess (pps ppx_deriving_rpc))) (library (name xcp_xen) @@ -44,5 +44,4 @@ xapi-idl xapi-idl.xen.interface ) - (wrapped false) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv))) + (wrapped false)) diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index e27762a2963..a3b86f166b4 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -13,6 +13,7 @@ message-switch-async message-switch-unix + ppx_deriving.runtime result rpclib.core rpclib.json @@ -71,3 +72,4 @@ (action (bash "export PYTHONPATH=../xapi-storage/python/; echo $PYTHONPATH; ./%{x} --root=$PWD/test --self-test-only=true")) ) +(data_only_dirs test examples) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate index 3115f233480..9cda8c0cf23 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.activate(request) - print json.dumps(results) + print(json.dumps(results)) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach index db6eb6de2eb..6a2ec399460 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach @@ -1,15 +1,15 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") import xapi, d -import argparse, json, urlparse +import argparse, json, urllib.parse class Implementation(d.Datapath_skeleton): def attach(self, dbg, uri, domain): - u = urlparse.urlparse(uri) + u = urllib.parse.urlparse(uri) return { 'implementations': [ ['XenDisk', {"backend_type":"vbd", "extra":{}, "params":u.path}], ['BlockDevice', {"path":u.path}] ] } @@ -20,10 +20,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.attach(request) - print json.dumps(results) + print(json.dumps(results)) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate index 48240856deb..1585a267eb0 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.deactivate(request) - print json.dumps(results) + print(json.dumps(results)) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach index aac2e9d3773..5e42f252943 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.detach(request) - print json.dumps(results) + print(json.dumps(results)) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 7420545205f..cd6575bc9b3 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1207,6 +1207,82 @@ let bind ~volume_script_dir = |> wrap in S.SR.scan sr_scan_impl ; + let sr_scan2_impl dbg sr = + let sr_uuid = Storage_interface.Sr.string_of sr in + let get_sr_info sr = + return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) + >>>= fun response -> + Deferred.Result.return + { + Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid + ; name_label= response.Xapi_storage.Control.name + ; name_description= response.Xapi_storage.Control.description + ; total_space= response.Xapi_storage.Control.total_space + ; free_space= response.Xapi_storage.Control.free_space + ; clustered= response.Xapi_storage.Control.clustered + ; health= + ( match response.Xapi_storage.Control.health with + | Xapi_storage.Control.Healthy _ -> + Healthy + | Xapi_storage.Control.Recovering _ -> + Recovering + | Xapi_storage.Control.Unreachable _ -> + Unreachable + | Xapi_storage.Control.Unavailable _ -> + Unavailable + ) + } + in + let get_volume_info sr sr_info = + return_volume_rpc (fun () -> + Sr_client.ls + (volume_rpc ~dbg ~compat_out:Compat.compat_out_volumes) + dbg sr + ) + >>>= fun response -> + let response = Array.to_list response in + (* Filter out volumes which are clone-on-boot transients *) + let transients = + List.fold + ~f:(fun set x -> + match + List.Assoc.find x.Xapi_storage.Control.keys _clone_on_boot_key + ~equal:String.equal + with + | None -> + set + | Some transient -> + Set.add set transient + ) + ~init:Core.String.Set.empty response + in + let response = + List.filter + ~f:(fun x -> not (Set.mem transients x.Xapi_storage.Control.key)) + response + in + Deferred.Result.return (List.map ~f:vdi_of_volume response, sr_info) + in + let rec stat_with_retry ?(times = 3) sr = + get_sr_info sr >>>= fun sr_info -> + match sr_info.health with + | Healthy -> + debug "%s sr %s is healthy" __FUNCTION__ sr_uuid ; + get_volume_info sr sr_info + | Unreachable when times > 0 -> + debug "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ + sr_uuid times ; + Clock.after Time.Span.second >>= fun () -> + stat_with_retry ~times:(times - 1) sr + | health -> + debug "%s: sr unhealthy because it is %s" __FUNCTION__ + (Storage_interface.show_sr_health health) ; + Deferred.Result.fail + Storage_interface.(Errors.Sr_unhealthy (sr_uuid, health)) + in + Attached_SRs.find sr >>>= stat_with_retry |> wrap + in + S.SR.scan2 sr_scan2_impl ; let vdi_create_impl dbg sr (vdi_info : Storage_interface.vdi_info) = Attached_SRs.find sr >>>= (fun sr -> @@ -1254,9 +1330,7 @@ let bind ~volume_script_dir = Volume_client.snapshot (volume_rpc ~dbg) dbg sr vdi ) >>>= fun response -> - let now = - Xapi_stdext_date.Date.(to_string (of_float (Unix.gettimeofday ()))) - in + let now = Xapi_stdext_date.Date.(to_rfc3339 (now ())) in set ~dbg ~sr ~vdi:response.Xapi_storage.Control.key ~key:_snapshot_time_key ~value:now >>>= fun () -> @@ -1863,10 +1937,7 @@ let self_test_plugin ~root_dir plugin = failwith "self test failed" let self_test ~root_dir = - ( self_test_plugin ~root_dir "org.xen.xapi.storage.dummy" >>>= fun () -> - self_test_plugin ~root_dir "org.xen.xapi.storage.dummyv5" - ) - >>= function + self_test_plugin ~root_dir "org.xen.xapi.storage.dummyv5" >>= function | Ok () -> info "test thread shutdown cleanly" ; Async_unix.exit 0 diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query deleted file mode 120000 index 96bd1391c0e..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query +++ /dev/null @@ -1 +0,0 @@ -plugin.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics deleted file mode 120000 index 96bd1391c0e..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics +++ /dev/null @@ -1 +0,0 @@ -plugin.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py deleted file mode 100755 index 08fb78407e0..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python2 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import os -import sys -import xapi.storage.api.plugin - - -class Implementation(xapi.storage.api.plugin.Plugin_skeleton): - - def diagnostics(self, dbg): - return "Dummy diagnostics" - - def query(self, dbg): - return { - "plugin": "dummy", - "name": "dummy SR plugin", - "description": ("Dummy SR for unit tests."), - "vendor": "Citrix Systems Inc", - "copyright": "(C) 2018 Citrix Inc", - "version": "1.0", - "required_api_version": "3.0", - "features": [ - "SR_ATTACH", - "SR_DETACH", - "SR_CREATE", - "VDI_CREATE", - "VDI_DESTROY"], - "configuration": {}, - "required_cluster_stack": []} - - -if __name__ == "__main__": - cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == 'Plugin.diagnostics': - cmd.diagnostics() - elif base == 'Plugin.Query': - cmd.query() - else: - raise xapi.storage.api.plugin.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py deleted file mode 100755 index 3cd7a211c8f..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python2 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import os -import sys -import urlparse -import xapi.storage.api.volume - -import plugin - - -class Implementation(xapi.storage.api.volume.SR_skeleton): - - def attach(self, dbg, uri): - return "file:///tmp/dummy" - - def create(self, dbg, uri, name, description, configuration): - return - - def detach(self, dbg, sr): - urlparse.urlparse(sr) - return - - def ls(self, dbg, sr): - urlparse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return [{ - "name": qr['name'], - "description": qr['description'], - "key": "file1", - "uuid": "file1", - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - }] - - def stat(self, dbg, sr): - urlparse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return { - "sr": sr, - "name": qr['name'], - "description": qr['description'], - "total_space": 0, - "free_space": 0, - "datasources": [], - "clustered": False, - "health": ["Healthy", ""] - } - - -if __name__ == "__main__": - cmd = xapi.storage.api.volume.SR_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == 'SR.attach': - cmd.attach() - elif base == 'SR.create': - cmd.create() - elif base == 'SR.detach': - cmd.detach() - elif base == 'SR.ls': - cmd.ls() - elif base == 'SR.stat': - cmd.stat() - else: - raise xapi.storage.api.volume.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py deleted file mode 100755 index 448ee6dcbc3..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python2 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import uuid -import urlparse -import os -import sys -import xapi.storage.api.volume -import xapi - -import plugin - - -class Implementation(xapi.storage.api.volume.Volume_skeleton): - - def create(self, dbg, sr, name, description, size): - urlparse.urlparse(sr) - voluuid = str(uuid.uuid4()) - return { - "name": name, - "description": description, - "key": voluuid, - "uuid": voluuid, - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - } - - def destroy(self, dbg, sr, key): - urlparse.urlparse(sr) - return - - def stat(self, dbg, sr, key): - urlparse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return { - "name": qr['name'], - "description": qr['description'], - "key": key, - "uuid": key, - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - } - - -if __name__ == "__main__": - cmd = xapi.storage.api.volume.Volume_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == "Volume.create": - cmd.create() - elif base == "Volume.destroy": - cmd.destroy() - elif base == "Volume.stat": - cmd.stat() - else: - raise xapi.storage.api.volume.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py index 5816f0dd217..bf54820cdc4 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. @@ -6,15 +6,15 @@ import os import sys -import xapi.storage.api.plugin +import xapi.storage.api.v5.plugin # pylint: disable=no-name-in-module -class Implementation(xapi.storage.api.plugin.Plugin_skeleton): +class Implementation(xapi.storage.api.v5.plugin.Plugin_skeleton): - def diagnostics(self, dbg): + def diagnostics(self, dbg): # pylint: disable=unused-argument return "Dummy diagnostics" - def query(self, dbg): + def query(self, dbg): # pylint: disable=unused-argument return { "plugin": "dummy", "name": "dummy SR plugin", @@ -35,11 +35,11 @@ def query(self, dbg): if __name__ == "__main__": - cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) + cmd = xapi.storage.api.v5.plugin.Plugin_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == 'Plugin.diagnostics': cmd.diagnostics() elif base == 'Plugin.Query': cmd.query() else: - raise xapi.storage.api.plugin.Unimplemented(base) + raise xapi.storage.api.v5.plugin.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py index 6100407e91d..3c649423d15 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. @@ -6,7 +6,7 @@ import os import sys -import urlparse +import urllib.parse import xapi.storage.api.v5.volume import plugin @@ -22,11 +22,11 @@ def create(self, dbg, uuid, configuration, name, description): return configuration def detach(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def ls(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return [{ "name": qr['name'], @@ -42,7 +42,7 @@ def ls(self, dbg, sr): }] def stat(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "sr": sr, diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py index 20822dd8d73..fcf52ce3883 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. """ import uuid -import urlparse +import urllib.parse import os import sys import xapi.storage.api.v5.volume @@ -17,7 +17,7 @@ class Implementation(xapi.storage.api.v5.volume.Volume_skeleton): def create(self, dbg, sr, name, description, size, sharable): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) voluuid = str(uuid.uuid4()) return { "name": name, @@ -33,11 +33,11 @@ def create(self, dbg, sr, name, description, size, sharable): } def destroy(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def stat(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "name": qr['name'], diff --git a/ocaml/xapi-storage/dune b/ocaml/xapi-storage/dune new file mode 100644 index 00000000000..f45f1f79866 --- /dev/null +++ b/ocaml/xapi-storage/dune @@ -0,0 +1 @@ +(data_only_dirs rpc-light) diff --git a/ocaml/xapi-storage/generator/lib/control.ml b/ocaml/xapi-storage/generator/lib/control.ml index e34c0183c36..e7f9274c48a 100644 --- a/ocaml/xapi-storage/generator/lib/control.ml +++ b/ocaml/xapi-storage/generator/lib/control.ml @@ -421,9 +421,15 @@ module Volume (R : RPC) = struct let compose = R.declare "compose" [ - "[compose sr volume1 volume2] layers the updates from [volume2] onto" - ; "[volume1], modifying [volume2]. Implementations shall declare the" - ; "VDI_COMPOSE feature for this method to be supported." + "[compose sr child_volume parent_volume] layers the updates from " + ; "[child_volume] onto [parent_volume], modifying [child_volume]. " + ; "In the case of a delta file format this means updating the " + ; "[child_volume] to have a parent or backing object defined by " + ; "[parent_volume]. Implementations shall declare the VDI_COMPOSE " + ; "feature for this method to be supported. After a successful " + ; "return it should be assumed that the [parent_volume] is no " + ; "longer valid. Calling SR.ls, will return the list of currently " + ; "known, valid, volumes." ] (dbg @-> sr @-> key @-> key2 @-> returning unit errors) diff --git a/ocaml/xapi-storage/generator/lib/data.ml b/ocaml/xapi-storage/generator/lib/data.ml index 142848b4d6d..e4571892f71 100644 --- a/ocaml/xapi-storage/generator/lib/data.ml +++ b/ocaml/xapi-storage/generator/lib/data.ml @@ -68,6 +68,14 @@ let domain = ~description:["An opaque string which represents the Xen domain."] domain +(** Path to a UNIX domain socket *) +type sock_path = string [@@deriving rpcty] + +let sock_path = + Param.mk ~name:"sock_path" + ~description:["A path to a UNIX domain socket in the filesystem."] + sock_path + open Idl module Datapath (R : RPC) = struct @@ -132,6 +140,23 @@ module Datapath (R : RPC) = struct ] (dbg @-> uri_p @-> domain @-> returning unit error) + let import_activate = + declare "import_activate" + [ + "[import_activate uri domain] prepares a connection to the " + ; " storage named by [uri] for use by inbound import mirroring, " + ; "the [domain] parameter identifies which domain to connect to, " + ; "most likely 0 or a custom storage domain. The return value is a " + ; "path to a UNIX domain socket to which an open file descriptor " + ; "may be passed, by SCM_RIGHTS. This, in turn, will become " + ; "the server end of a Network Block Device (NBD) connection " + ; "using, new-fixed protocol. Implementations shall declare the " + ; "VDI_MIRROR_IN feature for this method to be supported. It is " + ; "expected that activate will have been previously called so that " + ; "there is an active datapath." + ] + (dbg @-> uri_p @-> domain @-> returning sock_path error) + let deactivate = declare "deactivate" [ diff --git a/ocaml/xapi-storage/python/Makefile b/ocaml/xapi-storage/python/Makefile index bc8eff9b851..a2ccad97c8c 100644 --- a/ocaml/xapi-storage/python/Makefile +++ b/ocaml/xapi-storage/python/Makefile @@ -1,5 +1,5 @@ PREFIX?=/usr -PYTHON?=python2 +PYTHON?=python3 .PHONY: build release clean install uninstall diff --git a/ocaml/xapi-storage/python/dune b/ocaml/xapi-storage/python/dune new file mode 100644 index 00000000000..261d3661603 --- /dev/null +++ b/ocaml/xapi-storage/python/dune @@ -0,0 +1 @@ +(data_only_dirs examples) diff --git a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py index ed65d595477..10b1959e05c 100755 --- a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py +++ b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -15,10 +15,10 @@ # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -from __future__ import print_function + import os import sys -import urlparse +import urllib.parse import xapi.storage.api.v5.datapath from xapi.storage.common import call @@ -64,8 +64,8 @@ def activate(self, dbg, uri, domain): pass def attach(self, dbg, uri, domain): - parsed_url = urlparse.urlparse(uri) - query = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(uri) + query = urllib.parse.parse_qs(parsed_url.query) file_path = os.path.realpath(parsed_url.path) @@ -75,20 +75,22 @@ def attach(self, dbg, uri, domain): call(dbg, cmd) loop = Loop.from_path(dbg, file_path) + if not loop: + return {} return {"implementations": [ [ - 'XenDisk', + "XenDisk", { - 'backend_type': 'vbd', - 'params': loop.block_device(), - 'extra': {} + "backend_type": "vbd", + "params": loop.block_device(), + "extra": {} } ], [ - 'BlockDevice', + "BlockDevice", { - 'path': loop.block_device() + "path": loop.block_device() } ] ]} @@ -97,7 +99,7 @@ def deactivate(self, dbg, uri, domain): pass def detach(self, dbg, uri, domain): - parsed_url = urlparse.urlparse(uri) + parsed_url = urllib.parse.urlparse(uri) file_path = os.path.realpath(parsed_url.path) diff --git a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py index e16a53794a7..4cbc9939fbd 100755 --- a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py +++ b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group,Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py index 61a41db978f..583043015ed 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py index 35e96b6ab83..07f4f9c0436 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -18,8 +18,9 @@ from __future__ import print_function import os import sys -import urllib -import urlparse +import urllib.request +import urllib.parse +import urllib.error import xapi.storage.api.v5.volume from xapi import InternalError @@ -66,12 +67,12 @@ def attach(self, dbg, configuration): # As a simple "stateless" implementation, encode all the # configuration into the URI returned. This is passed back # into volume interface APIs and the stat and ls operations. - return urlparse.urlunparse(( + return urllib.parse.urlunparse(( 'file', '', configuration['path'], '', - urllib.urlencode(configuration, True), + urllib.parse.urlencode(configuration, True), None)) def detach(self, dbg, sr): @@ -96,8 +97,8 @@ def stat(self, dbg, sr): [stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls. """ - parsed_url = urlparse.urlparse(sr) - config = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(sr) + config = urllib.parse.parse_qs(parsed_url.query) description = (config['description'][0] if 'description' in config diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py index d97ceb4ab5d..6593a8fd536 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -21,8 +21,9 @@ import os import sys import uuid -import urllib -import urlparse +import urllib.request +import urllib.parse +import urllib.error import xapi.storage.api.v5.volume from xapi.storage import log @@ -31,8 +32,8 @@ class Implementation(xapi.storage.api.v5.volume.Volume_skeleton): def parse_sr(self, sr_uri): - parsed_url = urlparse.urlparse(sr_uri) - config = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(sr_uri) + config = urllib.parse.parse_qs(parsed_url.query) return parsed_url, config def create_volume_data(self, name, description, size, uris, uuid): @@ -50,8 +51,8 @@ def create_volume_data(self, name, description, size, uris, uuid): } def volume_uris(self, sr_path, name, size): - query = urllib.urlencode({'size': size}, True) - return [urlparse.urlunparse( + query = urllib.parse.urlencode({'size': size}, True) + return [urllib.parse.urlunparse( ('loop+blkback', None, os.path.join(sr_path, name), None, query, None))] @@ -187,7 +188,7 @@ def ls(self, dbg, sr): """ [ls sr] lists the volumes from [sr] """ - parsed_url = urlparse.urlparse(sr) + parsed_url = urllib.parse.urlparse(sr) sr_path = parsed_url.path files = glob.glob(os.path.join(sr_path, '*.inf')) log.debug('files to list {}'.format(files)) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index 0027af213bf..0f7c2a13de3 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ -Copyright (c) 2013-2018, Citrix Inc. +Copyright (c) 2013-2024, Cloud Software Group,Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -25,7 +25,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -from __future__ import print_function import sys import traceback import json @@ -67,7 +66,7 @@ class XenAPIException(Exception): def __init__(self, code, params): Exception.__init__(self) - if not isinstance(code, str) and not isinstance(code, unicode): + if not isinstance(code, str): raise TypeError("string", repr(code)) if not isinstance(params, list): raise TypeError("list", repr(params)) @@ -119,7 +118,7 @@ def __init__(self, thing, ty, desc): "UnmarshalException thing=%s ty=%s desc=%s" % (thing, ty, desc)) -class TypeError(InternalError): +class TypeError(InternalError): # pylint: disable=redefined-builtin def __init__(self, expected, actual): InternalError.__init__( @@ -134,7 +133,8 @@ def __init__(self, name): def is_long(x): try: - long(x) + # Python3 int is long, keep the name for interface compatibility + int(x) return True except ValueError: return False diff --git a/ocaml/xapi-storage/python/xapi/storage/__init__.py b/ocaml/xapi-storage/python/xapi/storage/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 diff --git a/ocaml/xapi-storage/python/xapi/storage/api/__init__.py b/ocaml/xapi-storage/python/xapi/storage/api/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py deleted file mode 100644 index 1d5b43b0dca..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ /dev/null @@ -1,401 +0,0 @@ -from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json -import argparse -import traceback -import logging -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Datapath_server_dispatcher: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def open(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('persistent' in args): - raise UnmarshalException('argument missing', 'persistent', '') - persistent = args["persistent"] - if not isinstance(persistent, bool): - raise TypeError("bool", repr(persistent)) - results = self._impl.open(dbg, uri, persistent) - return results - def attach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): - raise TypeError("string", repr(domain)) - results = self._impl.attach(dbg, uri, domain) - if not isinstance(results['domain_uuid'], str) and not isinstance(results['domain_uuid'], unicode): - raise TypeError("string", repr(results['domain_uuid'])) - if results['implementation'][0] == 'Blkback': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): - raise TypeError("string", repr(results['implementation'][1])) - elif results['implementation'][0] == 'Tapdisk3': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): - raise TypeError("string", repr(results['implementation'][1])) - elif results['implementation'][0] == 'Qdisk': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): - raise TypeError("string", repr(results['implementation'][1])) - return results - def activate(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): - raise TypeError("string", repr(domain)) - results = self._impl.activate(dbg, uri, domain) - return results - def deactivate(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): - raise TypeError("string", repr(domain)) - results = self._impl.deactivate(dbg, uri, domain) - return results - def detach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): - raise TypeError("string", repr(domain)) - results = self._impl.detach(dbg, uri, domain) - return results - def close(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - results = self._impl.close(dbg, uri) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Datapath.open": - return success(self.open(args)) - elif method == "Datapath.attach": - return success(self.attach(args)) - elif method == "Datapath.activate": - return success(self.activate(args)) - elif method == "Datapath.deactivate": - return success(self.deactivate(args)) - elif method == "Datapath.detach": - return success(self.detach(args)) - elif method == "Datapath.close": - return success(self.close(args)) -class Datapath_skeleton: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - def __init__(self): - pass - def open(self, dbg, uri, persistent): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.open") - def attach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.attach") - def activate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.activate") - def deactivate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.deactivate") - def detach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.detach") - def close(self, dbg, uri): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.close") -class Datapath_test: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - def __init__(self): - pass - def open(self, dbg, uri, persistent): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def attach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - result["backend"] = { "domain_uuid": "string", "implementation": None } - return result - def activate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def deactivate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def detach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def close(self, dbg, uri): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result -class Datapath_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Datapath_server_dispatcher(self.impl) - def _parse_open(self): - """[open uri persistent] is called before a disk is attached to a VM. If persistent is true then care should be taken to persist all writes to the disk. If persistent is false then the implementation should configure a temporary location for writes so they can be thrown away on [close].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[open uri persistent] is called before a disk is attached to a VM. If persistent is true then care should be taken to persist all writes to the disk. If persistent is false then the implementation should configure a temporary location for writes so they can be thrown away on [close].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('--persistent', action='store_true', help='True means the disk data is persistent and should be preserved when the datapath is closed i.e. when a VM is shutdown or rebooted. False means the data should be thrown away when the VM is shutdown or rebooted.') - return vars(parser.parse_args()) - def _parse_attach(self): - """[attach uri domain] prepares a connection between the storage named by [uri] and the Xen domain with id [domain]. The return value is the information needed by the Xen toolstack to setup the shared-memory blkfront protocol. Note that the same volume may be simultaneously attached to multiple hosts for example over a migrate. If an implementation needs to perform an explicit handover, then it should implement [activate] and [deactivate]. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[attach uri domain] prepares a connection between the storage named by [uri] and the Xen domain with id [domain]. The return value is the information needed by the Xen toolstack to setup the shared-memory blkfront protocol. Note that the same volume may be simultaneously attached to multiple hosts for example over a migrate. If an implementation needs to perform an explicit handover, then it should implement [activate] and [deactivate]. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_activate(self): - """[activate uri domain] is called just before a VM needs to read or write its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[activate uri domain] is called just before a VM needs to read or write its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_deactivate(self): - """[deactivate uri domain] is called as soon as a VM has finished reading or writing its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[deactivate uri domain] is called as soon as a VM has finished reading or writing its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_detach(self): - """[detach uri domain] is called sometime after a VM has finished reading or writing its disk. This is an opportunity to clean up any resources associated with the disk. This function is called outside the migration downtime window so can be slow without affecting users. This function is idempotent. This function should never fail. If an implementation is unable to perform some cleanup right away then it should queue the action internally. Any error result represents a bug in the implementation.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[detach uri domain] is called sometime after a VM has finished reading or writing its disk. This is an opportunity to clean up any resources associated with the disk. This function is called outside the migration downtime window so can be slow without affecting users. This function is idempotent. This function should never fail. If an implementation is unable to perform some cleanup right away then it should queue the action internally. Any error result represents a bug in the implementation.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_close(self): - """[close uri] is called after a disk is detached and a VM shutdown. This is an opportunity to throw away writes if the disk is not persistent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[close uri] is called after a disk is detached and a VM shutdown. This is an opportunity to throw away writes if the disk is not persistent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - return vars(parser.parse_args()) - def open(self): - use_json = False - try: - request = self._parse_open() - use_json = 'json' in request and request['json'] - results = self.dispatcher.open(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def attach(self): - use_json = False - try: - request = self._parse_attach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.attach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def activate(self): - use_json = False - try: - request = self._parse_activate() - use_json = 'json' in request and request['json'] - results = self.dispatcher.activate(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def deactivate(self): - use_json = False - try: - request = self._parse_deactivate() - use_json = 'json' in request and request['json'] - results = self.dispatcher.deactivate(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def detach(self): - use_json = False - try: - request = self._parse_detach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.detach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def close(self): - use_json = False - try: - request = self._parse_close() - use_json = 'json' in request and request['json'] - results = self.dispatcher.close(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class datapath_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Datapath=None): - self.Datapath = Datapath - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Datapath") and self.Datapath: - return self.Datapath._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class datapath_server_test(datapath_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - datapath_server_dispatcher.__init__(self, Datapath_server_dispatcher(Datapath_test())) \ No newline at end of file diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py deleted file mode 100644 index 0185d900148..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ /dev/null @@ -1,233 +0,0 @@ -from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json -import argparse -import traceback -import logging -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Plugin_server_dispatcher: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def query(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - results = self._impl.query(dbg) - if not isinstance(results['plugin'], str) and not isinstance(results['plugin'], unicode): - raise TypeError("string", repr(results['plugin'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['vendor'], str) and not isinstance(results['vendor'], unicode): - raise TypeError("string", repr(results['vendor'])) - if not isinstance(results['copyright'], str) and not isinstance(results['copyright'], unicode): - raise TypeError("string", repr(results['copyright'])) - if not isinstance(results['version'], str) and not isinstance(results['version'], unicode): - raise TypeError("string", repr(results['version'])) - if not isinstance(results['required_api_version'], str) and not isinstance(results['required_api_version'], unicode): - raise TypeError("string", repr(results['required_api_version'])) - if not isinstance(results['features'], list): - raise TypeError("string list", repr(results['features'])) - for tmp_1 in results['features']: - if not isinstance(tmp_1, str) and not isinstance(tmp_1, unicode): - raise TypeError("string", repr(tmp_1)) - if not isinstance(results['configuration'], dict): - raise TypeError("(string * string) list", repr(results['configuration'])) - for tmp_2 in results['configuration'].keys(): - if not isinstance(tmp_2, str) and not isinstance(tmp_2, unicode): - raise TypeError("string", repr(tmp_2)) - for tmp_2 in results['configuration'].values(): - if not isinstance(tmp_2, str) and not isinstance(tmp_2, unicode): - raise TypeError("string", repr(tmp_2)) - if not isinstance(results['required_cluster_stack'], list): - raise TypeError("string list", repr(results['required_cluster_stack'])) - for tmp_3 in results['required_cluster_stack']: - if not isinstance(tmp_3, str) and not isinstance(tmp_3, unicode): - raise TypeError("string", repr(tmp_3)) - return results - def ls(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - results = self._impl.ls(dbg) - if not isinstance(results, list): - raise TypeError("string list", repr(results)) - for tmp_4 in results: - if not isinstance(tmp_4, str) and not isinstance(tmp_4, unicode): - raise TypeError("string", repr(tmp_4)) - return results - def diagnostics(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - results = self._impl.diagnostics(dbg) - if not isinstance(results, str) and not isinstance(results, unicode): - raise TypeError("string", repr(results)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Plugin.query": - return success(self.query(args)) - elif method == "Plugin.ls": - return success(self.ls(args)) - elif method == "Plugin.diagnostics": - return success(self.diagnostics(args)) -class Plugin_skeleton: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self): - pass - def query(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.query") - def ls(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.ls") - def diagnostics(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.diagnostics") -class Plugin_test: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self): - pass - def query(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["query_result"] = { "plugin": "string", "name": "string", "description": "string", "vendor": "string", "copyright": "string", "version": "string", "required_api_version": "string", "features": [ "string", "string" ], "configuration": { "string": "string" }, "required_cluster_stack": [ "string", "string" ] } - return result - def ls(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["srs"] = [ "string", "string" ] - return result - def diagnostics(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["diagnostics"] = "string" - return result -class Plugin_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Plugin_server_dispatcher(self.impl) - def _parse_query(self): - """Query this implementation and return its properties. This is called by xapi to determine whether it is compatible with xapi and to discover the supported features.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='Query this implementation and return its properties. This is called by xapi to determine whether it is compatible with xapi and to discover the supported features.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def _parse_ls(self): - """[ls dbg]: returns a list of attached SRs""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[ls dbg]: returns a list of attached SRs') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def _parse_diagnostics(self): - """Returns a printable set of backend diagnostic information. Implementations are encouraged to include any data which will be useful to diagnose problems. Note this data should not include personally-identifiable data as it is intended to be automatically included in bug reports.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='Returns a printable set of backend diagnostic information. Implementations are encouraged to include any data which will be useful to diagnose problems. Note this data should not include personally-identifiable data as it is intended to be automatically included in bug reports.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def query(self): - use_json = False - try: - request = self._parse_query() - use_json = 'json' in request and request['json'] - results = self.dispatcher.query(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def ls(self): - use_json = False - try: - request = self._parse_ls() - use_json = 'json' in request and request['json'] - results = self.dispatcher.ls(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def diagnostics(self): - use_json = False - try: - request = self._parse_diagnostics() - use_json = 'json' in request and request['json'] - results = self.dispatcher.diagnostics(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class plugin_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Plugin=None): - self.Plugin = Plugin - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Plugin") and self.Plugin: - return self.Plugin._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class plugin_server_test(plugin_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - plugin_server_dispatcher.__init__(self, Plugin_server_dispatcher(Plugin_test())) \ No newline at end of file diff --git a/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py b/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 diff --git a/ocaml/xapi-storage/python/xapi/storage/api/volume.py b/ocaml/xapi-storage/python/xapi/storage/api/volume.py deleted file mode 100644 index b89574f9570..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/volume.py +++ /dev/null @@ -1,1416 +0,0 @@ -from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json -import argparse -import traceback -import logging - -# pylint: disable=invalid-name,redefined-builtin,undefined-variable -# pyright: reportUndefinedVariable=false -if sys.version_info[0] > 2: - long = int - unicode = str - str = bytes - -class Sr_not_attached(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Sr_not_attached", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class SR_does_not_exist(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "SR_does_not_exist", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Volume_does_not_exist(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Volume_does_not_exist", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Cancelled(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Cancelled", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Volume_server_dispatcher: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def create(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('name' in args): - raise UnmarshalException('argument missing', 'name', '') - name = args["name"] - if not isinstance(name, str) and not isinstance(name, unicode): - raise TypeError("string", repr(name)) - if not('description' in args): - raise UnmarshalException('argument missing', 'description', '') - description = args["description"] - if not isinstance(description, str) and not isinstance(description, unicode): - raise TypeError("string", repr(description)) - if not('size' in args): - raise UnmarshalException('argument missing', 'size', '') - size = args["size"] - if not(is_long(size)): - raise TypeError("int64", repr(size)) - results = self._impl.create(dbg, sr, name, description, size) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): - raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_5 in results['uri']: - if not isinstance(tmp_5, str) and not isinstance(tmp_5, unicode): - raise TypeError("string", repr(tmp_5)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_6 in results['keys'].keys(): - if not isinstance(tmp_6, str) and not isinstance(tmp_6, unicode): - raise TypeError("string", repr(tmp_6)) - for tmp_6 in results['keys'].values(): - if not isinstance(tmp_6, str) and not isinstance(tmp_6, unicode): - raise TypeError("string", repr(tmp_6)) - return results - def snapshot(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - results = self._impl.snapshot(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): - raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_7 in results['uri']: - if not isinstance(tmp_7, str) and not isinstance(tmp_7, unicode): - raise TypeError("string", repr(tmp_7)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_8 in results['keys'].keys(): - if not isinstance(tmp_8, str) and not isinstance(tmp_8, unicode): - raise TypeError("string", repr(tmp_8)) - for tmp_8 in results['keys'].values(): - if not isinstance(tmp_8, str) and not isinstance(tmp_8, unicode): - raise TypeError("string", repr(tmp_8)) - return results - def clone(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - results = self._impl.clone(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): - raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_9 in results['uri']: - if not isinstance(tmp_9, str) and not isinstance(tmp_9, unicode): - raise TypeError("string", repr(tmp_9)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_10 in results['keys'].keys(): - if not isinstance(tmp_10, str) and not isinstance(tmp_10, unicode): - raise TypeError("string", repr(tmp_10)) - for tmp_10 in results['keys'].values(): - if not isinstance(tmp_10, str) and not isinstance(tmp_10, unicode): - raise TypeError("string", repr(tmp_10)) - return results - def destroy(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - results = self._impl.destroy(dbg, sr, key) - return results - def set_name(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - if not('new_name' in args): - raise UnmarshalException('argument missing', 'new_name', '') - new_name = args["new_name"] - if not isinstance(new_name, str) and not isinstance(new_name, unicode): - raise TypeError("string", repr(new_name)) - results = self._impl.set_name(dbg, sr, key, new_name) - return results - def set_description(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - if not('new_description' in args): - raise UnmarshalException('argument missing', 'new_description', '') - new_description = args["new_description"] - if not isinstance(new_description, str) and not isinstance(new_description, unicode): - raise TypeError("string", repr(new_description)) - results = self._impl.set_description(dbg, sr, key, new_description) - return results - def set(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - if not('k' in args): - raise UnmarshalException('argument missing', 'k', '') - k = args["k"] - if not isinstance(k, str) and not isinstance(k, unicode): - raise TypeError("string", repr(k)) - if not('v' in args): - raise UnmarshalException('argument missing', 'v', '') - v = args["v"] - if not isinstance(v, str) and not isinstance(v, unicode): - raise TypeError("string", repr(v)) - results = self._impl.set(dbg, sr, key, k, v) - return results - def unset(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - if not('k' in args): - raise UnmarshalException('argument missing', 'k', '') - k = args["k"] - if not isinstance(k, str) and not isinstance(k, unicode): - raise TypeError("string", repr(k)) - results = self._impl.unset(dbg, sr, key, k) - return results - def resize(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - if not('new_size' in args): - raise UnmarshalException('argument missing', 'new_size', '') - new_size = args["new_size"] - if not(is_long(new_size)): - raise TypeError("int64", repr(new_size)) - results = self._impl.resize(dbg, sr, key, new_size) - return results - def stat(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): - raise TypeError("string", repr(key)) - results = self._impl.stat(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): - raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_11 in results['uri']: - if not isinstance(tmp_11, str) and not isinstance(tmp_11, unicode): - raise TypeError("string", repr(tmp_11)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_12 in results['keys'].keys(): - if not isinstance(tmp_12, str) and not isinstance(tmp_12, unicode): - raise TypeError("string", repr(tmp_12)) - for tmp_12 in results['keys'].values(): - if not isinstance(tmp_12, str) and not isinstance(tmp_12, unicode): - raise TypeError("string", repr(tmp_12)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Volume.create": - return success(self.create(args)) - elif method == "Volume.snapshot": - return success(self.snapshot(args)) - elif method == "Volume.clone": - return success(self.clone(args)) - elif method == "Volume.destroy": - return success(self.destroy(args)) - elif method == "Volume.set_name": - return success(self.set_name(args)) - elif method == "Volume.set_description": - return success(self.set_description(args)) - elif method == "Volume.set": - return success(self.set(args)) - elif method == "Volume.unset": - return success(self.unset(args)) - elif method == "Volume.resize": - return success(self.resize(args)) - elif method == "Volume.stat": - return success(self.stat(args)) -class Volume_skeleton: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self): - pass - def create(self, dbg, sr, name, description, size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.create") - def snapshot(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.snapshot") - def clone(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.clone") - def destroy(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.destroy") - def set_name(self, dbg, sr, key, new_name): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set_name") - def set_description(self, dbg, sr, key, new_description): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set_description") - def set(self, dbg, sr, key, k, v): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set") - def unset(self, dbg, sr, key, k): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.unset") - def resize(self, dbg, sr, key, new_size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.resize") - def stat(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.stat") -class Volume_test: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self): - pass - def create(self, dbg, sr, name, description, size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def snapshot(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def clone(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def destroy(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set_name(self, dbg, sr, key, new_name): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set_description(self, dbg, sr, key, new_description): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set(self, dbg, sr, key, k, v): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def unset(self, dbg, sr, key, k): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def resize(self, dbg, sr, key, new_size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def stat(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result -class Volume_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Volume_server_dispatcher(self.impl) - def _parse_create(self): - """[create sr name description size] creates a new volume in [sr] with [name] and [description]. The volume will have size >= [size] i.e. it is always permissable for an implementation to round-up the volume to the nearest convenient block size""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[create sr name description size] creates a new volume in [sr] with [name] and [description]. The volume will have size >= [size] i.e. it is always permissable for an implementation to round-up the volume to the nearest convenient block size') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('name', action='store', help='A human-readable name to associate with the new disk. This name is intended to be short, to be a good summary of the disk.') - parser.add_argument('description', action='store', help='A human-readable description to associate with the new disk. This can be arbitrarily long, up to the general string size limit.') - parser.add_argument('size', action='store', help='A minimum size (in bytes) for the disk. Depending on the characteristics of the implementation this may be rounded up to (for example) the nearest convenient block size. The created disk will not be smaller than this size.') - return vars(parser.parse_args()) - def _parse_snapshot(self): - """[snapshot sr volume] creates a new volue which is a snapshot of [volume] in [sr]. Snapshots should never be written to; they are intended for backup/restore only. Note the name and description are copied but any extra metadata associated by [set] is not copied.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[snapshot sr volume] creates a new volue which is a snapshot of [volume] in [sr]. Snapshots should never be written to; they are intended for backup/restore only. Note the name and description are copied but any extra metadata associated by [set] is not copied.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_clone(self): - """[clone sr volume] creates a new volume which is a writable clone of [volume] in [sr]. Note the name and description are copied but any extra metadata associated by [set] is not copied.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[clone sr volume] creates a new volume which is a writable clone of [volume] in [sr]. Note the name and description are copied but any extra metadata associated by [set] is not copied.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_destroy(self): - """[destroy sr volume] removes [volume] from [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[destroy sr volume] removes [volume] from [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_set_name(self): - """[set_name sr volume new_name] changes the name of [volume]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_name sr volume new_name] changes the name of [volume]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_name', action='store', help='New name') - return vars(parser.parse_args()) - def _parse_set_description(self): - """[set_description sr volume new_description] changes the description of [volume]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_description sr volume new_description] changes the description of [volume]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_description', action='store', help='New description') - return vars(parser.parse_args()) - def _parse_set(self): - """[set sr volume key value] associates [key] with [value] in the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set sr volume key value] associates [key] with [value] in the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('k', action='store', help='Key') - parser.add_argument('v', action='store', help='Value') - return vars(parser.parse_args()) - def _parse_unset(self): - """[unset sr volume key] removes [key] and any value associated with it from the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[unset sr volume key] removes [key] and any value associated with it from the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('k', action='store', help='Key') - return vars(parser.parse_args()) - def _parse_resize(self): - """[resize sr volume new_size] enlarges [volume] to be at least [new_size].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[resize sr volume new_size] enlarges [volume] to be at least [new_size].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_size', action='store', help='New disk size') - return vars(parser.parse_args()) - def _parse_stat(self): - """[stat sr volume] returns metadata associated with [volume].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[stat sr volume] returns metadata associated with [volume].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def create(self): - use_json = False - try: - request = self._parse_create() - use_json = 'json' in request and request['json'] - results = self.dispatcher.create(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def snapshot(self): - use_json = False - try: - request = self._parse_snapshot() - use_json = 'json' in request and request['json'] - results = self.dispatcher.snapshot(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def clone(self): - use_json = False - try: - request = self._parse_clone() - use_json = 'json' in request and request['json'] - results = self.dispatcher.clone(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def destroy(self): - use_json = False - try: - request = self._parse_destroy() - use_json = 'json' in request and request['json'] - results = self.dispatcher.destroy(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_name(self): - use_json = False - try: - request = self._parse_set_name() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_name(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_description(self): - use_json = False - try: - request = self._parse_set_description() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_description(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set(self): - use_json = False - try: - request = self._parse_set() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def unset(self): - use_json = False - try: - request = self._parse_unset() - use_json = 'json' in request and request['json'] - results = self.dispatcher.unset(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def resize(self): - use_json = False - try: - request = self._parse_resize() - use_json = 'json' in request and request['json'] - results = self.dispatcher.resize(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def stat(self): - use_json = False - try: - request = self._parse_stat() - use_json = 'json' in request and request['json'] - results = self.dispatcher.stat(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class SR_server_dispatcher: - """Operations which act on Storage Repositories""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def probe(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - results = self._impl.probe(dbg, uri) - if not isinstance(results['srs'], list): - raise TypeError("7 list", repr(results['srs'])) - for tmp_13 in results['srs']: - if not isinstance(tmp_13['sr'], str) and not isinstance(tmp_13['sr'], unicode): - raise TypeError("string", repr(tmp_13['sr'])) - if not isinstance(tmp_13['name'], str) and not isinstance(tmp_13['name'], unicode): - raise TypeError("string", repr(tmp_13['name'])) - if not isinstance(tmp_13['description'], str) and not isinstance(tmp_13['description'], unicode): - raise TypeError("string", repr(tmp_13['description'])) - if not(is_long(tmp_13['free_space'])): - raise TypeError("int64", repr(tmp_13['free_space'])) - if not(is_long(tmp_13['total_space'])): - raise TypeError("int64", repr(tmp_13['total_space'])) - if not isinstance(tmp_13['datasources'], list): - raise TypeError("string list", repr(tmp_13['datasources'])) - for tmp_14 in tmp_13['datasources']: - if not isinstance(tmp_14, str) and not isinstance(tmp_14, unicode): - raise TypeError("string", repr(tmp_14)) - if not isinstance(tmp_13['clustered'], bool): - raise TypeError("bool", repr(tmp_13['clustered'])) - if tmp_13['health'][0] == 'Healthy': - if not isinstance(tmp_13['health'][1], str) and not isinstance(tmp_13['health'][1], unicode): - raise TypeError("string", repr(tmp_13['health'][1])) - elif tmp_13['health'][0] == 'Recovering': - if not isinstance(tmp_13['health'][1], str) and not isinstance(tmp_13['health'][1], unicode): - raise TypeError("string", repr(tmp_13['health'][1])) - if not isinstance(results['uris'], list): - raise TypeError("string list", repr(results['uris'])) - for tmp_15 in results['uris']: - if not isinstance(tmp_15, str) and not isinstance(tmp_15, unicode): - raise TypeError("string", repr(tmp_15)) - return results - def create(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - if not('name' in args): - raise UnmarshalException('argument missing', 'name', '') - name = args["name"] - if not isinstance(name, str) and not isinstance(name, unicode): - raise TypeError("string", repr(name)) - if not('description' in args): - raise UnmarshalException('argument missing', 'description', '') - description = args["description"] - if not isinstance(description, str) and not isinstance(description, unicode): - raise TypeError("string", repr(description)) - if not('configuration' in args): - raise UnmarshalException('argument missing', 'configuration', '') - configuration = args["configuration"] - if not isinstance(configuration, dict): - raise TypeError("(string * string) list", repr(configuration)) - for tmp_16 in configuration.keys(): - if not isinstance(tmp_16, str) and not isinstance(tmp_16, unicode): - raise TypeError("string", repr(tmp_16)) - for tmp_16 in configuration.values(): - if not isinstance(tmp_16, str) and not isinstance(tmp_16, unicode): - raise TypeError("string", repr(tmp_16)) - results = self._impl.create(dbg, uri, name, description, configuration) - return results - def attach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): - raise TypeError("string", repr(uri)) - results = self._impl.attach(dbg, uri) - if not isinstance(results, str) and not isinstance(results, unicode): - raise TypeError("string", repr(results)) - return results - def detach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - results = self._impl.detach(dbg, sr) - return results - def destroy(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - results = self._impl.destroy(dbg, sr) - return results - def stat(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - results = self._impl.stat(dbg, sr) - if not isinstance(results['sr'], str) and not isinstance(results['sr'], unicode): - raise TypeError("string", repr(results['sr'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): - raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): - raise TypeError("string", repr(results['description'])) - if not(is_long(results['free_space'])): - raise TypeError("int64", repr(results['free_space'])) - if not(is_long(results['total_space'])): - raise TypeError("int64", repr(results['total_space'])) - if not isinstance(results['datasources'], list): - raise TypeError("string list", repr(results['datasources'])) - for tmp_17 in results['datasources']: - if not isinstance(tmp_17, str) and not isinstance(tmp_17, unicode): - raise TypeError("string", repr(tmp_17)) - if not isinstance(results['clustered'], bool): - raise TypeError("bool", repr(results['clustered'])) - if results['health'][0] == 'Healthy': - if not isinstance(results['health'][1], str) and not isinstance(results['health'][1], unicode): - raise TypeError("string", repr(results['health'][1])) - elif results['health'][0] == 'Recovering': - if not isinstance(results['health'][1], str) and not isinstance(results['health'][1], unicode): - raise TypeError("string", repr(results['health'][1])) - return results - def set_name(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('new_name' in args): - raise UnmarshalException('argument missing', 'new_name', '') - new_name = args["new_name"] - if not isinstance(new_name, str) and not isinstance(new_name, unicode): - raise TypeError("string", repr(new_name)) - results = self._impl.set_name(dbg, sr, new_name) - return results - def set_description(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - if not('new_description' in args): - raise UnmarshalException('argument missing', 'new_description', '') - new_description = args["new_description"] - if not isinstance(new_description, str) and not isinstance(new_description, unicode): - raise TypeError("string", repr(new_description)) - results = self._impl.set_description(dbg, sr, new_description) - return results - def ls(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): - raise TypeError("string", repr(sr)) - results = self._impl.ls(dbg, sr) - if not isinstance(results, list): - raise TypeError("8 list", repr(results)) - for tmp_18 in results: - if not isinstance(tmp_18['key'], str) and not isinstance(tmp_18['key'], unicode): - raise TypeError("string", repr(tmp_18['key'])) - if tmp_18['uuid'] is not None: - if not isinstance(tmp_18['uuid'], str) and not isinstance(tmp_18['uuid'], unicode): - raise TypeError("string", repr(tmp_18['uuid'])) - if not isinstance(tmp_18['name'], str) and not isinstance(tmp_18['name'], unicode): - raise TypeError("string", repr(tmp_18['name'])) - if not isinstance(tmp_18['description'], str) and not isinstance(tmp_18['description'], unicode): - raise TypeError("string", repr(tmp_18['description'])) - if not isinstance(tmp_18['read_write'], bool): - raise TypeError("bool", repr(tmp_18['read_write'])) - if not(is_long(tmp_18['virtual_size'])): - raise TypeError("int64", repr(tmp_18['virtual_size'])) - if not(is_long(tmp_18['physical_utilisation'])): - raise TypeError("int64", repr(tmp_18['physical_utilisation'])) - if not isinstance(tmp_18['uri'], list): - raise TypeError("string list", repr(tmp_18['uri'])) - for tmp_19 in tmp_18['uri']: - if not isinstance(tmp_19, str) and not isinstance(tmp_19, unicode): - raise TypeError("string", repr(tmp_19)) - if not isinstance(tmp_18['keys'], dict): - raise TypeError("(string * string) list", repr(tmp_18['keys'])) - for tmp_20 in tmp_18['keys'].keys(): - if not isinstance(tmp_20, str) and not isinstance(tmp_20, unicode): - raise TypeError("string", repr(tmp_20)) - for tmp_20 in tmp_18['keys'].values(): - if not isinstance(tmp_20, str) and not isinstance(tmp_20, unicode): - raise TypeError("string", repr(tmp_20)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "SR.probe": - return success(self.probe(args)) - elif method == "SR.create": - return success(self.create(args)) - elif method == "SR.attach": - return success(self.attach(args)) - elif method == "SR.detach": - return success(self.detach(args)) - elif method == "SR.destroy": - return success(self.destroy(args)) - elif method == "SR.stat": - return success(self.stat(args)) - elif method == "SR.set_name": - return success(self.set_name(args)) - elif method == "SR.set_description": - return success(self.set_description(args)) - elif method == "SR.ls": - return success(self.ls(args)) -class SR_skeleton: - """Operations which act on Storage Repositories""" - def __init__(self): - pass - def probe(self, dbg, uri): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.probe") - def create(self, dbg, uri, name, description, configuration): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.create") - def attach(self, dbg, uri): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.attach") - def detach(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.detach") - def destroy(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.destroy") - def stat(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.stat") - def set_name(self, dbg, sr, new_name): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.set_name") - def set_description(self, dbg, sr, new_description): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.set_description") - def ls(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.ls") -class SR_test: - """Operations which act on Storage Repositories""" - def __init__(self): - pass - def probe(self, dbg, uri): - """Operations which act on Storage Repositories""" - result = {} - result["result"] = { "srs": [ { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None }, { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None } ], "uris": [ "string", "string" ] } - return result - def create(self, dbg, uri, name, description, configuration): - """Operations which act on Storage Repositories""" - result = {} - return result - def attach(self, dbg, uri): - """Operations which act on Storage Repositories""" - result = {} - result["sr"] = "string" - return result - def detach(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - return result - def destroy(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - return result - def stat(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - result["sr"] = { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None } - return result - def set_name(self, dbg, sr, new_name): - """Operations which act on Storage Repositories""" - result = {} - return result - def set_description(self, dbg, sr, new_description): - """Operations which act on Storage Repositories""" - result = {} - return result - def ls(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - result["volumes"] = [ { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } }, { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } ] - return result -class SR_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = SR_server_dispatcher(self.impl) - def _parse_probe(self): - """[probe uri]: looks for existing SRs on the storage device""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[probe uri]: looks for existing SRs on the storage device') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - return vars(parser.parse_args()) - def _parse_create(self): - """[create uri name description configuration]: creates a fresh SR""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[create uri name description configuration]: creates a fresh SR') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - parser.add_argument('name', action='store', help='Human-readable name for the SR') - parser.add_argument('description', action='store', help='Human-readable description for the SR') - parser.add_argument('--configuration', default={}, nargs=2, action=xapi.ListAction, help='Plugin-specific configuration which describes where and how to create the storage repository. This may include the physical block device name, a remote NFS server and path or an RBD storage pool.') - return vars(parser.parse_args()) - def _parse_attach(self): - """[attach uri]: attaches the SR to the local host. Once an SR is attached then volumes may be manipulated.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[attach uri]: attaches the SR to the local host. Once an SR is attached then volumes may be manipulated.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - return vars(parser.parse_args()) - def _parse_detach(self): - """[detach sr]: detaches the SR, clearing up any associated resources. Once the SR is detached then volumes may not be manipulated.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[detach sr]: detaches the SR, clearing up any associated resources. Once the SR is detached then volumes may not be manipulated.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_destroy(self): - """[destroy sr]: destroys the [sr] and deletes any volumes associated with it. Note that an SR must be attached to be destroyed; otherwise Sr_not_attached is thrown.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[destroy sr]: destroys the [sr] and deletes any volumes associated with it. Note that an SR must be attached to be destroyed; otherwise Sr_not_attached is thrown.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_stat(self): - """[stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_set_name(self): - """[set_name sr new_name] changes the name of [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_name sr new_name] changes the name of [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('new_name', action='store', help='The new name of the SR') - return vars(parser.parse_args()) - def _parse_set_description(self): - """[set_description sr new_description] changes the description of [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_description sr new_description] changes the description of [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('new_description', action='store', help='The new description for the SR') - return vars(parser.parse_args()) - def _parse_ls(self): - """[ls sr] returns a list of volumes contained within an attached SR.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[ls sr] returns a list of volumes contained within an attached SR.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def probe(self): - use_json = False - try: - request = self._parse_probe() - use_json = 'json' in request and request['json'] - results = self.dispatcher.probe(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def create(self): - use_json = False - try: - request = self._parse_create() - use_json = 'json' in request and request['json'] - results = self.dispatcher.create(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def attach(self): - use_json = False - try: - request = self._parse_attach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.attach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def detach(self): - use_json = False - try: - request = self._parse_detach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.detach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def destroy(self): - use_json = False - try: - request = self._parse_destroy() - use_json = 'json' in request and request['json'] - results = self.dispatcher.destroy(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def stat(self): - use_json = False - try: - request = self._parse_stat() - use_json = 'json' in request and request['json'] - results = self.dispatcher.stat(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_name(self): - use_json = False - try: - request = self._parse_set_name() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_name(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_description(self): - use_json = False - try: - request = self._parse_set_description() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_description(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def ls(self): - use_json = False - try: - request = self._parse_ls() - use_json = 'json' in request and request['json'] - results = self.dispatcher.ls(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class volume_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Volume=None, SR=None): - self.Volume = Volume - self.SR = SR - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Volume") and self.Volume: - return self.Volume._dispatch(method, params) - elif method.startswith("SR") and self.SR: - return self.SR._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class volume_server_test(volume_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - volume_server_dispatcher.__init__(self, Volume_server_dispatcher(Volume_test()), SR_server_dispatcher(SR_test())) \ No newline at end of file diff --git a/ocaml/xapi-storage/python/xapi/storage/common.py b/ocaml/xapi-storage/python/xapi/storage/common.py index a311446a416..e8d34869277 100644 --- a/ocaml/xapi-storage/python/xapi/storage/common.py +++ b/ocaml/xapi-storage/python/xapi/storage/common.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 from xapi.storage import log import xapi diff --git a/ocaml/xapi-types/features.ml b/ocaml/xapi-types/features.ml index 6e838f32b83..c80d3c833a5 100644 --- a/ocaml/xapi-types/features.ml +++ b/ocaml/xapi-types/features.ml @@ -65,6 +65,8 @@ type feature = | Internal_repo_access | VTPM | VM_groups + | VM_start + | VM_appliance_start [@@deriving rpc] type orientation = Positive | Negative @@ -134,13 +136,16 @@ let keys_of_features = ) ; (VTPM, ("restrict_vtpm", Negative, "VTPM")) ; (VM_groups, ("restrict_vm_groups", Negative, "VM_groups")) + ; (VM_start, ("restrict_vm_start", Negative, "Start")) + ; (VM_appliance_start, ("restrict_vm_appliance_start", Negative, "Start")) ] (* A list of features that must be considered "enabled" by `of_assoc_list` if the feature string is missing from the list. These are existing features that have been recently restricted, and which we want to remain enabled during a rolling pool upgrade. *) -let enabled_when_unknown = [Xen_motion; AD; Updates] +let enabled_when_unknown = + [Xen_motion; AD; Updates; VM_start; VM_appliance_start] let name_of_feature f = rpc_of_feature f |> Rpc.string_of_rpc diff --git a/ocaml/xapi-types/features.mli b/ocaml/xapi-types/features.mli index bcd1ef4ac66..f6efce3f0a5 100644 --- a/ocaml/xapi-types/features.mli +++ b/ocaml/xapi-types/features.mli @@ -73,6 +73,8 @@ type feature = (** Enable restriction on repository access to pool members only *) | VTPM (** Support VTPM device required by Win11 guests *) | VM_groups (** Enable use of VM groups *) + | VM_start (** Allow starting of VMs (!) *) + | VM_appliance_start (** Allow starting of VM appliances *) val feature_of_rpc : Rpc.t -> feature (** Convert RPC into {!feature}s *) diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml index fe66194cb0e..6e1c01b7be6 100644 --- a/ocaml/xapi/certificates.ml +++ b/ocaml/xapi/certificates.ml @@ -214,7 +214,7 @@ end = struct in (name, Ref.null, `ca, remove_obsoleted_copies) in - let date_of_ptime time = Date.of_float (Ptime.to_float_s time) in + let date_of_ptime time = Date.of_unix_time (Ptime.to_float_s time) in let dates_of_ptimes (a, b) = (date_of_ptime a, date_of_ptime b) in let not_before, not_after = dates_of_ptimes (X509.Certificate.validity certificate) diff --git a/ocaml/xapi/create_misc.ml b/ocaml/xapi/create_misc.ml index beb94f4751c..d329170fce6 100644 --- a/ocaml/xapi/create_misc.ml +++ b/ocaml/xapi/create_misc.ml @@ -280,7 +280,7 @@ and create_domain_zero_record ~__context ~domain_zero_ref (host_info : host_info ~scheduled_to_be_resident_on:Ref.null ~affinity:localhost ~suspend_VDI:Ref.null ~domid:0L ~domarch ~is_control_domain:true ~is_a_template:false ~is_default_template:false ~is_a_snapshot:false - ~snapshot_time:Date.never ~snapshot_of:Ref.null + ~snapshot_time:Date.epoch ~snapshot_of:Ref.null ~transportable_snapshot_id:"" ~snapshot_info:[] ~snapshot_metadata:"" ~parent:Ref.null ~other_config:[] ~blobs:[] ~xenstore_data:[] ~tags:[] ~user_version:1L ~ha_restart_priority:"" ~ha_always_run:false @@ -348,8 +348,8 @@ and create_domain_zero_metrics_record ~__context ~domain_zero_metrics_ref ~memory_actual:memory_constraints.target ~vCPUs_utilisation:(List.map (fun x -> (Int64.of_int x, 0.)) (mkints vcpus)) ~vCPUs_number:(Int64.of_int vcpus) ~vCPUs_CPU:[] ~vCPUs_params:[] - ~vCPUs_flags:[] ~state:[] ~start_time:Date.never ~install_time:Date.never - ~last_updated:Date.never ~other_config:[] ~hvm:false ~nomigrate:false + ~vCPUs_flags:[] ~state:[] ~start_time:Date.epoch ~install_time:Date.epoch + ~last_updated:Date.epoch ~other_config:[] ~hvm:false ~nomigrate:false ~nested_virt:false ~current_domain_type:Xapi_globs.domain_zero_domain_type and update_domain_zero_record ~__context ~domain_zero_ref (host_info : host_info) diff --git a/ocaml/xapi/db_gc_util.ml b/ocaml/xapi/db_gc_util.ml index 182eaac00df..7972aa28ed9 100644 --- a/ocaml/xapi/db_gc_util.ml +++ b/ocaml/xapi/db_gc_util.ml @@ -308,14 +308,14 @@ let timeout_tasks ~__context = let completed_old, completed_young = List.partition (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time + Date.to_unix_time t.Db_actions.task_finished < oldest_completed_time ) completed_gcable in let pending_old, pending_young = List.partition (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time + Date.to_unix_time t.Db_actions.task_created < oldest_pending_time ) pending in @@ -360,8 +360,8 @@ let timeout_tasks ~__context = List.sort (fun (_, t1) (_, t2) -> compare - (Date.to_float t1.Db_actions.task_finished) - (Date.to_float t2.Db_actions.task_finished) + (Date.to_unix_time t1.Db_actions.task_finished) + (Date.to_unix_time t2.Db_actions.task_finished) ) completed in @@ -422,7 +422,7 @@ let timeout_sessions_common ~__context sessions limit session_group = List.map (fun (x, y) -> ( x - , Date.to_float y.Db_actions.session_last_active + , Date.to_unix_time y.Db_actions.session_last_active , y.Db_actions.session_uuid ) ) @@ -447,7 +447,7 @@ let timeout_sessions_common ~__context sessions limit session_group = debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) - (Date.to_string (Date.of_float active)) + (Date.to_rfc3339 (Date.of_unix_time active)) doc ; Xapi_session.destroy_db_session ~__context ~self:s ) @@ -586,7 +586,7 @@ let timeout_alerts ~__context = let all_alerts = Db.Alert.get_all ~__context in let now = Unix.gettimeofday() in List.iter (fun alert -> - let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in + let alert_time = Date.to_unix_time (Db.Alert.get_timestamp ~__context ~self:alert) in if now -. alert_time > Xapi_globs.alert_timeout then Db.Alert.destroy ~__context ~self:alert ) all_alerts diff --git a/ocaml/xapi/dbsync.ml b/ocaml/xapi/dbsync.ml index 5ee30bb39c8..875b406bb89 100644 --- a/ocaml/xapi/dbsync.ml +++ b/ocaml/xapi/dbsync.ml @@ -42,7 +42,7 @@ let create_host_metrics ~__context = Db.Host_metrics.create ~__context ~ref:r ~uuid:(Uuidx.to_string (Uuidx.make ())) ~live:false ~memory_total:0L ~memory_free:0L - ~last_updated:Xapi_stdext_date.Date.never ~other_config:[] ; + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ; Db.Host.set_metrics ~__context ~self ~value:r ) ) diff --git a/ocaml/xapi/dbsync_master.ml b/ocaml/xapi/dbsync_master.ml index 8f8e6a582f8..31f235e7214 100644 --- a/ocaml/xapi/dbsync_master.ml +++ b/ocaml/xapi/dbsync_master.ml @@ -291,11 +291,10 @@ let ensure_vm_metrics_records_exist __context = let uuid = Uuidx.to_string (Uuidx.make ()) in Db.VM_metrics.create ~__context ~ref:m ~uuid ~vCPUs_number:0L ~vCPUs_utilisation:[] ~memory_actual:0L ~vCPUs_CPU:[] ~vCPUs_params:[] - ~vCPUs_flags:[] ~start_time:Xapi_stdext_date.Date.never - ~install_time:Xapi_stdext_date.Date.never ~state:[] - ~last_updated:(Xapi_stdext_date.Date.of_float 0.) - ~other_config:[] ~hvm:false ~nested_virt:false ~nomigrate:false - ~current_domain_type:`unspecified ; + ~vCPUs_flags:[] ~start_time:Xapi_stdext_date.Date.epoch + ~install_time:Xapi_stdext_date.Date.epoch ~state:[] + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ~hvm:false + ~nested_virt:false ~nomigrate:false ~current_domain_type:`unspecified ; Db.VM.set_metrics ~__context ~self:vm ~value:m ) ) diff --git a/ocaml/xapi/dbsync_slave.ml b/ocaml/xapi/dbsync_slave.ml index 32ee7d44d21..3b90a3a05c3 100644 --- a/ocaml/xapi/dbsync_slave.ml +++ b/ocaml/xapi/dbsync_slave.ml @@ -73,12 +73,12 @@ let get_start_time () = let uptime = String.split ' ' uptime in let uptime = List.hd uptime in let uptime = float_of_string uptime in - let boot_time = Date.of_float (now -. uptime) in - debug " system booted at %s" (Date.to_string boot_time) ; + let boot_time = Date.of_unix_time (now -. uptime) in + debug " system booted at %s" (Date.to_rfc3339 boot_time) ; boot_time with e -> debug "Calculating boot time failed with '%s'" (ExnHelper.string_of_exn e) ; - Date.never + Date.epoch (* not sufficient just to fill in this data on create time [Xen caps may change if VT enabled in BIOS etc.] *) @@ -106,7 +106,9 @@ let refresh_localhost_info ~__context info = Db.Host.set_capabilities ~__context ~self:host ~value:caps ; Db.Host.set_address ~__context ~self:host ~value:(get_my_ip_addr ~__context) ; let boot_time_key = "boot_time" in - let boot_time_value = string_of_float (Date.to_float (get_start_time ())) in + let boot_time_value = + string_of_float (Date.to_unix_time (get_start_time ())) + in Db.Host.remove_from_other_config ~__context ~self:host ~key:boot_time_key ; Db.Host.add_to_other_config ~__context ~self:host ~key:boot_time_key ~value:boot_time_value ; diff --git a/ocaml/xapi/debug_populate.ml b/ocaml/xapi/debug_populate.ml index 0eb1a89581a..3c192509915 100644 --- a/ocaml/xapi/debug_populate.ml +++ b/ocaml/xapi/debug_populate.ml @@ -72,7 +72,7 @@ let rec make_vdis_and_vbds __context vmref i = let physical_utilisation = 1L in let metadata_of_pool = Ref.null in let is_a_snapshot = false in - let snapshot_time = Xapi_stdext_date.Date.never in + let snapshot_time = Xapi_stdext_date.Date.epoch in let snapshot_of = Ref.null in let sharable = false in let cbt_enabled = false in diff --git a/ocaml/xapi/export.ml b/ocaml/xapi/export.ml index 24589827bc8..a81ec647225 100644 --- a/ocaml/xapi/export.ml +++ b/ocaml/xapi/export.ml @@ -241,7 +241,7 @@ let make_vm ?(with_snapshot_metadata = false) ~preserve_power_state table ; API.vM_snapshots= (if with_snapshot_metadata then vm.API.vM_snapshots else []) ; API.vM_snapshot_time= - (if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.never) + (if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.epoch) ; API.vM_transportable_snapshot_id= ( if with_snapshot_metadata then vm.API.vM_transportable_snapshot_id diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index bc9d3e1db0b..7e1a1cb8f12 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -468,8 +468,8 @@ module VM : HandlerTools = struct ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_state_to_string `Halted - ; Record_util.power_state_to_string power_state + ; Record_util.vm_power_state_to_string `Halted + ; Record_util.vm_power_state_to_string power_state ] ) ) @@ -788,6 +788,7 @@ module GuestMetrics : HandlerTools = struct Db.VM_guest_metrics.create ~__context ~ref:gm ~uuid:(Uuidx.to_string (Uuidx.make ())) ~os_version:gm_record.API.vM_guest_metrics_os_version + ~netbios_name:gm_record.API.vM_guest_metrics_netbios_name ~pV_drivers_version:gm_record.API.vM_guest_metrics_PV_drivers_version ~pV_drivers_up_to_date: gm_record.API.vM_guest_metrics_PV_drivers_up_to_date diff --git a/ocaml/xapi/license_check.ml b/ocaml/xapi/license_check.ml index d34f2bd4526..e6df516f353 100644 --- a/ocaml/xapi/license_check.ml +++ b/ocaml/xapi/license_check.ml @@ -20,7 +20,7 @@ let never, _ = let get_expiry_date ~__context ~host = let license = Db.Host.get_license_params ~__context ~self:host in if List.mem_assoc "expiry" license then - Some (Xapi_stdext_date.Date.of_string (List.assoc "expiry" license)) + Some (Xapi_stdext_date.Date.of_iso8601 (List.assoc "expiry" license)) else None @@ -30,7 +30,7 @@ let check_expiry ~__context ~host = | None -> false (* No expiry date means no expiry :) *) | Some date -> - Unix.time () > Xapi_stdext_date.Date.to_float date + Unix.time () > Xapi_stdext_date.Date.to_unix_time date in if expired then raise (Api_errors.Server_error (Api_errors.license_expired, [])) diff --git a/ocaml/xapi/license_check.mli b/ocaml/xapi/license_check.mli index f970ff878df..610faaf9e0b 100644 --- a/ocaml/xapi/license_check.mli +++ b/ocaml/xapi/license_check.mli @@ -20,9 +20,7 @@ val never : float (** The expiry date that is considered to be "never". *) val get_expiry_date : - __context:Context.t - -> host:API.ref_host - -> Xapi_stdext_date.Date.iso8601 option + __context:Context.t -> host:API.ref_host -> Xapi_stdext_date.Date.t option (** Returns (Some date) if the host's license has an expiry date, * otherwise returns None. *) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index ce6e69ef54e..83d4ff26e24 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -743,6 +743,7 @@ functor let start ~__context ~self ~paused = info "VM_appliance.start: VM_appliance = '%s'" (vm_appliance_uuid ~__context self) ; + Pool_features.assert_enabled ~__context ~f:Features.VM_appliance_start ; with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.start" ~op:`start (fun () -> Local.VM_appliance.start ~__context ~self ~paused @@ -1130,7 +1131,7 @@ functor let set_telemetry_next_collection ~__context ~self ~value = info "%s: pool='%s' value='%s'" __FUNCTION__ (pool_uuid ~__context self) - (Xapi_stdext_date.Date.to_string value) ; + (Xapi_stdext_date.Date.to_rfc3339 value) ; Local.Pool.set_telemetry_next_collection ~__context ~self ~value let reset_telemetry_uuid ~__context ~self = @@ -1854,6 +1855,7 @@ functor let start ~__context ~vm ~start_paused ~force = info "VM.start: VM = '%s'" (vm_uuid ~__context vm) ; + Pool_features.assert_enabled ~__context ~f:Features.VM_start ; Xapi_vm_helpers.assert_no_legacy_hardware ~__context ~vm ; let local_fn = Local.VM.start ~vm ~start_paused ~force in let host = @@ -2914,6 +2916,8 @@ functor info "VM.assert_can_boot_here: VM = '%s'; host = '%s'" (vm_uuid ~__context self) (host_uuid ~__context host) ; + if Db.VM.get_power_state ~__context ~self = `Halted then + Pool_features.assert_enabled ~__context ~f:Features.VM_start ; Local.VM.assert_can_boot_here ~__context ~self ~host let retrieve_wlb_recommendations ~__context ~vm = @@ -3093,6 +3097,23 @@ functor let get_secureboot_readiness ~__context ~self = info "VM.get_secureboot_readiness: self = '%s'" (vm_uuid ~__context self) ; Local.VM.get_secureboot_readiness ~__context ~self + + let set_blocked_operations ~__context ~self ~value = + info "VM.set_blocked_operations: self = '%s'" (vm_uuid ~__context self) ; + Local.VM.set_blocked_operations ~__context ~self ~value ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let add_to_blocked_operations ~__context ~self ~key ~value = + info "VM.add_to_blocked_operations: self = '%s'" + (vm_uuid ~__context self) ; + Local.VM.add_to_blocked_operations ~__context ~self ~key ~value ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let remove_from_blocked_operations ~__context ~self ~key = + info "VM.remove_from_blocked_operations: self = '%s'" + (vm_uuid ~__context self) ; + Local.VM.remove_from_blocked_operations ~__context ~self ~key ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self end module VM_metrics = struct end @@ -4364,7 +4385,7 @@ functor let unplug_common ~__context ~self ~force = let op = `unplug in - let name = "VIF." ^ Record_util.vif_operation_to_string op in + let name = "VIF." ^ Record_util.vif_operations_to_string op in info "%s: VIF = '%s'" name (vif_uuid ~__context self) ; let local_fn, remote_fn = if force then @@ -6200,7 +6221,7 @@ functor module SDN_controller = struct let introduce ~__context ~protocol ~address ~port = info "SDN_controller.introduce: protocol='%s', address='%s', port='%Ld'" - (Record_util.sdn_protocol_to_string protocol) + (Record_util.sdn_controller_protocol_to_string protocol) address port ; Local.SDN_controller.introduce ~__context ~protocol ~address ~port diff --git a/ocaml/xapi/monitor_master.ml b/ocaml/xapi/monitor_master.ml index bb4e6cf2e5b..ffad86ccd6c 100644 --- a/ocaml/xapi/monitor_master.ml +++ b/ocaml/xapi/monitor_master.ml @@ -70,8 +70,7 @@ let set_pif_metrics ~__context ~self ~vendor ~device ~carrier ~speed ~duplex Db.PIF_metrics.set_duplex ~__context ~self ~value:duplex ; if pmr.API.pIF_metrics_pci_bus_path <> pcibuspath then Db.PIF_metrics.set_pci_bus_path ~__context ~self ~value:pcibuspath ; - Db.PIF_metrics.set_last_updated ~__context ~self - ~value:(Date.of_float (Unix.gettimeofday ())) + Db.PIF_metrics.set_last_updated ~__context ~self ~value:(Date.now ()) (* Note that the following function is actually called on the slave most of the * time now but only when the PIF information changes. *) @@ -190,8 +189,8 @@ let update_pifs ~__context host pifs = ~uuid:(Uuidx.to_string (Uuidx.make ())) ~carrier:false ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" ~speed:0L ~duplex:false ~pci_bus_path:"" - ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Date.of_float 0.) ~other_config:[] ; + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch + ~other_config:[] ; Db.PIF.set_metrics ~__context ~self:pifdev ~value:ref ; ref in diff --git a/ocaml/xapi/pool_periodic_update_sync.ml b/ocaml/xapi/pool_periodic_update_sync.ml index 5f1e502b2b4..45aacf82a9c 100644 --- a/ocaml/xapi/pool_periodic_update_sync.ml +++ b/ocaml/xapi/pool_periodic_update_sync.ml @@ -144,7 +144,7 @@ let rec update_sync () = with e -> let exc = Printexc.to_string e in warn "Periodic update sync failed with exception %s" exc ; - let now = Xapi_stdext_date.Date.(now () |> to_string) in + let now = Xapi_stdext_date.Date.(now () |> to_rfc3339) in let body = Printf.sprintf "Periodic update sync \ diff --git a/ocaml/xapi/repository.ml b/ocaml/xapi/repository.ml index 95007999782..d798246d0b0 100644 --- a/ocaml/xapi/repository.ml +++ b/ocaml/xapi/repository.ml @@ -298,7 +298,7 @@ let get_applied_livepatches_of_host updates_of_host = let is_livepatchable ~__context repository applied_livepatches_of_host = let updates_info = - parse_updateinfo ~__context ~self:repository ~check:false + parse_updateinfo ~__context ~self:repository ~check:false |> snd in List.exists (fun lp -> @@ -562,7 +562,7 @@ let get_pool_updates_in_json ~__context ~hosts = set_available_updates ~__context |> ignore ; let repository_name = get_repository_name ~__context ~self:repository in - let updates_info = + let api_ver, updates_info = parse_updateinfo ~__context ~self:repository ~check:true in let updates_of_hosts, ids_of_updates = @@ -579,12 +579,17 @@ let get_pool_updates_in_json ~__context ~hosts = |> List.map (fun upd_id -> List.assoc upd_id updates_info) |> List.map (prune_updateinfo_for_livepatches lps) in - `Assoc - [ - ("hosts", `List (List.map HostUpdates.to_json updates_of_hosts)) - ; ("updates", `List (List.map UpdateInfo.to_json updateinfo_list)) - ; ("hash", `String (Db.Repository.get_hash ~__context ~self:repository)) - ] + let f x = + Option.fold ~none:x + ~some:(fun api_ver -> ("xapi-api-version", `String api_ver) :: x) + api_ver + in + [ + ("hosts", `List (List.map HostUpdates.to_json updates_of_hosts)) + ; ("updates", `List (List.map UpdateInfo.to_json updateinfo_list)) + ; ("hash", `String (Db.Repository.get_hash ~__context ~self:repository)) + ] + |> fun x -> `Assoc (f x) with | Api_errors.(Server_error (code, _)) as e when code <> Api_errors.internal_error -> @@ -787,7 +792,7 @@ let apply_updates ~__context ~host ~hash = raise Api_errors.(Server_error (updateinfo_hash_mismatch, [])) ; with_pool_repositories (fun () -> let updates_info = - parse_updateinfo ~__context ~self:repository ~check:true + parse_updateinfo ~__context ~self:repository ~check:true |> snd in let updates_of_hosts = if Helpers.is_pool_master ~__context ~host then ( diff --git a/ocaml/xapi/session_check.ml b/ocaml/xapi/session_check.ml index 27812fc5244..d30dbb6d4e3 100644 --- a/ocaml/xapi/session_check.ml +++ b/ocaml/xapi/session_check.ml @@ -53,9 +53,34 @@ let check ~intra_pool_only ~session_id ~action = if (not pool) && not (Pool_role.is_master ()) then raise Non_master_login_on_slave ; if Pool_role.is_master () then - Db_actions.DB_Action.Session.set_last_active ~__context - ~self:session_id - ~value:(Xapi_stdext_date.Date.of_float (Unix.time ())) + (* before updating the last_active field, check if the field has been + already updated recently. This avoids holding the database lock too often.*) + let n = Xapi_stdext_date.Date.now () in + let last_active = + Db_actions.DB_Action.Session.get_last_active ~__context + ~self:session_id + in + let ptime_now = Xapi_stdext_date.Date.to_ptime n in + let refresh_threshold = + let last_active_ptime = + Xapi_stdext_date.Date.to_ptime last_active + in + match + Ptime.add_span last_active_ptime + !Xapi_globs.threshold_last_active + with + | None -> + let err_msg = + "Can't add the configurable threshold of last active to \ + the current time." + in + raise Api_errors.(Server_error (internal_error, [err_msg])) + | Some ptime -> + ptime + in + if Ptime.is_later ptime_now ~than:refresh_threshold then + Db_actions.DB_Action.Session.set_last_active ~__context + ~self:session_id ~value:n with | Db_exn.DBCache_NotFound (_, _, reference) -> info diff --git a/ocaml/xapi/storage_access.ml b/ocaml/xapi/storage_access.ml index a307eb48bdd..c92651bc576 100644 --- a/ocaml/xapi/storage_access.ml +++ b/ocaml/xapi/storage_access.ml @@ -31,6 +31,11 @@ let s_of_vdi = Vdi.string_of let s_of_sr = Sr.string_of let transform_storage_exn f = + let get_sr_ref sr_uuid = + Server_helpers.exec_with_new_task "transform_storage_exn" (fun __context -> + Db.SR.get_by_uuid ~__context ~uuid:sr_uuid + ) + in try f () with | Storage_error (Backend_error (code, params)) as e -> Backtrace.reraise e (Api_errors.Server_error (code, params)) @@ -39,17 +44,30 @@ let transform_storage_exn f = let backtrace = Backtrace.Interop.of_json "SM" backtrace in Backtrace.add e backtrace ; Backtrace.reraise e (Api_errors.Server_error (code, params)) + | Storage_error (Sr_unhealthy (sr, health)) as e -> + let advice = + match health with + | Unavailable -> + "try reboot" + | Unreachable -> + "try again later" + | _health -> + "" + in + let sr = get_sr_ref sr in + Backtrace.reraise e + (Api_errors.Server_error + ( Api_errors.sr_unhealthy + , [Ref.string_of sr; Storage_interface.show_sr_health health; advice] + ) + ) | Api_errors.Server_error _ as e -> raise e | Storage_error (No_storage_plugin_for_sr sr) as e -> - Server_helpers.exec_with_new_task "transform_storage_exn" - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Backtrace.reraise e - (Api_errors.Server_error - (Api_errors.sr_not_attached, [Ref.string_of sr]) - ) - ) + let sr = get_sr_ref sr in + Backtrace.reraise e + (Api_errors.Server_error (Api_errors.sr_not_attached, [Ref.string_of sr]) + ) | e -> Backtrace.reraise e (Api_errors.Server_error diff --git a/ocaml/xapi/storage_mux.ml b/ocaml/xapi/storage_mux.ml index 3a11ad0077f..b14476a3d9d 100644 --- a/ocaml/xapi/storage_mux.ml +++ b/ocaml/xapi/storage_mux.ml @@ -348,6 +348,14 @@ module Mux = struct end)) in C.SR.stat (Debug_info.to_string di) sr + let scan2 () ~dbg ~sr = + with_dbg ~name:"SR.scan2" ~dbg @@ fun di -> + info "SR.scan2 dbg:%s sr:%s" dbg (s_of_sr sr) ; + let module C = StorageAPI (Idl.Exn.GenClient (struct + let rpc = of_sr sr + end)) in + C.SR.scan2 (Debug_info.to_string di) sr + let scan () ~dbg ~sr = with_dbg ~name:"SR.scan" ~dbg @@ fun di -> info "SR.scan dbg:%s sr:%s" dbg (s_of_sr sr) ; diff --git a/ocaml/xapi/storage_smapiv1.ml b/ocaml/xapi/storage_smapiv1.ml index 465b5d354b1..d8bf2cdc203 100644 --- a/ocaml/xapi/storage_smapiv1.ml +++ b/ocaml/xapi/storage_smapiv1.ml @@ -86,7 +86,7 @@ let vdi_info_of_vdi_rec __context vdi_rec = ; ty= Storage_utils.string_of_vdi_type vdi_rec.API.vDI_type ; metadata_of_pool= Ref.string_of vdi_rec.API.vDI_metadata_of_pool ; is_a_snapshot= vdi_rec.API.vDI_is_a_snapshot - ; snapshot_time= Date.to_string vdi_rec.API.vDI_snapshot_time + ; snapshot_time= Date.to_rfc3339 vdi_rec.API.vDI_snapshot_time ; snapshot_of= ( if Db.is_valid_ref __context vdi_rec.API.vDI_snapshot_of then Db.VDI.get_uuid ~__context ~self:vdi_rec.API.vDI_snapshot_of @@ -146,7 +146,7 @@ module SMAPIv1 : Server_impl = struct Server_helpers.exec_with_new_task "VDI.set_snapshot_time" ~subtask_of:(Ref.of_string dbg) (fun __context -> let vdi, _ = find_vdi ~__context sr vdi in - let snapshot_time = Date.of_string snapshot_time in + let snapshot_time = Date.of_iso8601 snapshot_time in Db.VDI.set_snapshot_time ~__context ~self:vdi ~value:snapshot_time ) @@ -761,7 +761,7 @@ module SMAPIv1 : Server_impl = struct (Db.VDI.get_other_config ~__context ~self:clonee) with _ -> Uuidx.(to_string (make ())) in - let snapshot_time = Date.of_float (Unix.gettimeofday ()) in + let snapshot_time = Date.now () in Db.VDI.set_name_label ~__context ~self ~value:vdi_info.name_label ; Db.VDI.set_name_description ~__context ~self ~value:vdi_info.name_description ; diff --git a/ocaml/xapi/storage_smapiv1_wrapper.ml b/ocaml/xapi/storage_smapiv1_wrapper.ml index 04d0e99ecf8..7c5a6a97f43 100644 --- a/ocaml/xapi/storage_smapiv1_wrapper.ml +++ b/ocaml/xapi/storage_smapiv1_wrapper.ml @@ -90,7 +90,7 @@ let host_state_path = ref "/var/run/nonpersistent/xapi/storage.db" let indent x = " " ^ x -let string_of_date x = Date.to_string (Date.of_float x) +let string_of_date x = Date.to_rfc3339 (Date.of_unix_time x) let with_dbg ~name ~dbg f = Debug_info.with_dbg ~with_thread:true ~module_name:"SMAPIv1-Wrapper" ~name @@ -1210,6 +1210,20 @@ functor Impl.SR.scan context ~dbg ~sr ) + let scan2 context ~dbg ~sr = + with_dbg ~name:"SR.scan2" ~dbg @@ fun di -> + info "SR.scan2 dbg:%s sr:%s" di.log (s_of_sr sr) ; + let dbg = Debug_info.to_string di in + with_sr sr (fun () -> + match Host.find sr !Host.host with + | None -> + raise (Storage_error (Sr_not_attached (s_of_sr sr))) + | Some _ -> + let vs = Impl.SR.scan context ~dbg ~sr in + let sr_info = Impl.SR.stat context ~dbg ~sr in + (vs, sr_info) + ) + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = with_dbg ~name:"SR.create" ~dbg @@ fun di -> diff --git a/ocaml/xapi/taskHelper.ml b/ocaml/xapi/taskHelper.ml index 1f1d4fd7744..27e30ce3d39 100644 --- a/ocaml/xapi/taskHelper.ml +++ b/ocaml/xapi/taskHelper.ml @@ -44,7 +44,7 @@ let make ~__context ~http_other_config ?(description = "") ?session_id in let (_ : unit) = Db_actions.DB_Action.Task.create ~ref ~__context ~created:(Date.now ()) - ~finished:(Date.of_float 0.0) ~current_operations:[] ~_type:"" + ~finished:Date.epoch ~current_operations:[] ~_type:"" ~session:(Option.value ~default:Ref.null session_id) ~resident_on:!Xapi_globs.localhost_ref ~status:`pending ~result:"" ~progress:0. ~error_info:[] ~allowed_operations:[] diff --git a/ocaml/xapi/updateinfo.ml b/ocaml/xapi/updateinfo.ml index 092af683232..88e4a0cf0fc 100644 --- a/ocaml/xapi/updateinfo.ml +++ b/ocaml/xapi/updateinfo.ml @@ -531,8 +531,10 @@ end module UpdateInfo = struct (** The [guidance] deprecates [rec_guidance], [abs_guidance] and [livepatch_guidance] *) + type id_t = string + type t = { - id: string + id: id_t ; summary: string ; description: string ; guidance: GuidanceInUpdateInfo.t @@ -546,6 +548,8 @@ module UpdateInfo = struct ; title: string } + type api_ver_t = string + let guidance_to_string o = Option.value (Option.map Guidance.to_string o) ~default:"" @@ -558,7 +562,7 @@ module UpdateInfo = struct ; ("special-info", `String ui.spec_info) ; ("URL", `String ui.url) ; ("type", `String ui.update_type) - ; ("issued", `String (Xapi_stdext_date.Date.to_string ui.issued)) + ; ("issued", `String (Xapi_stdext_date.Date.to_rfc3339 ui.issued)) ; ("severity", `String (Severity.to_string ui.severity)) ; ( "livepatches" , `List (List.map (fun x -> LivePatch.to_json x) ui.livepatches) @@ -605,94 +609,106 @@ module UpdateInfo = struct Option.value (List.assoc_opt kind updateinfo.guidance) ~default:[] let of_xml = function - | Xml.Element ("updates", _, children) -> - List.filter_map - (fun n -> - match n with - | Xml.Element ("update", attr, update_nodes) -> - let ty = - match List.assoc_opt "type" attr with - | Some ty -> - ty - | None -> - "" - in - let ui = - List.fold_left - (fun acc node -> - match node with - | Xml.Element ("id", _, [Xml.PCData v]) -> - {acc with id= v} - | Xml.Element ("url", _, [Xml.PCData v]) -> - {acc with url= v} - | Xml.Element ("special_info", _, [Xml.PCData v]) -> - {acc with spec_info= v} - | Xml.Element ("summary", _, [Xml.PCData v]) -> - {acc with summary= v} - | Xml.Element ("description", _, [Xml.PCData v]) -> - {acc with description= v} - | Xml.Element ("guidance", _, guidance_blocks) -> - { - acc with - guidance= - GuidanceInUpdateInfo.of_xml guidance_blocks - } - | Xml.Element ("guidance_applicabilities", _, apps) -> - { - acc with - guidance_applicabilities= - List.filter_map Applicability.of_xml apps - } - | Xml.Element ("livepatches", _, livepatches) -> - {acc with livepatches= LivePatch.of_xml livepatches} - | Xml.Element ("issued", attr, _) -> - let issued = - match List.assoc_opt "date" attr with - | Some date -> ( - try - Xapi_stdext_date.Date.of_string - (Scanf.sscanf date - "%04d-%02d-%02d %02d:%02d:%02d" - (fun y mon d h m s -> - Printf.sprintf - "%04i%02i%02iT%02i:%02i:%02iZ" y mon d - h m s - ) - ) - with e -> - (* The error should not block update. Ingore it - and set "issued" as epoch. *) - warn "%s" (ExnHelper.string_of_exn e) ; - Xapi_stdext_date.Date.epoch - ) - | None -> - Xapi_stdext_date.Date.epoch - in - {acc with issued} - | Xml.Element ("severity", _, [Xml.PCData v]) -> ( - try {acc with severity= Severity.of_string v} - with e -> - (* The error should not block update. Ingore it. *) - warn "%s" (ExnHelper.string_of_exn e) ; - acc + | Xml.Element ("updates", attrs, children) -> ( + let api_ver = List.assoc_opt "xapi-api-version" attrs in + let uis = + List.filter_map + (fun n -> + match n with + | Xml.Element ("update", attrs, update_nodes) -> + let ty = + match List.assoc_opt "type" attrs with + | Some ty -> + ty + | None -> + "" + in + let ui = + List.fold_left + (fun acc node -> + match node with + | Xml.Element ("id", _, [Xml.PCData v]) -> + {acc with id= v} + | Xml.Element ("url", _, [Xml.PCData v]) -> + {acc with url= v} + | Xml.Element ("special_info", _, [Xml.PCData v]) -> + {acc with spec_info= v} + | Xml.Element ("summary", _, [Xml.PCData v]) -> + {acc with summary= v} + | Xml.Element ("description", _, [Xml.PCData v]) -> + {acc with description= v} + | Xml.Element ("guidance", _, guidance_blocks) -> + { + acc with + guidance= + GuidanceInUpdateInfo.of_xml guidance_blocks + } + | Xml.Element ("guidance_applicabilities", _, apps) -> + { + acc with + guidance_applicabilities= + List.filter_map Applicability.of_xml apps + } + | Xml.Element ("livepatches", _, livepatches) -> + {acc with livepatches= LivePatch.of_xml livepatches} + | Xml.Element ("issued", attrs, _) -> + let issued = + match List.assoc_opt "date" attrs with + | Some date -> ( + try + Xapi_stdext_date.Date.of_iso8601 + (Scanf.sscanf date + "%04d-%02d-%02d %02d:%02d:%02d" + (fun y mon d h m s -> + Printf.sprintf + "%04i%02i%02iT%02i:%02i:%02iZ" y mon + d h m s + ) + ) + with e -> + (* The error should not block update. Ingore it + and set "issued" as epoch. *) + warn "%s" (ExnHelper.string_of_exn e) ; + Xapi_stdext_date.Date.epoch + ) + | None -> + Xapi_stdext_date.Date.epoch + in + {acc with issued} + | Xml.Element ("severity", _, [Xml.PCData v]) -> ( + try {acc with severity= Severity.of_string v} + with e -> + (* The error should not block update. Ingore it. *) + warn "%s" (ExnHelper.string_of_exn e) ; + acc + ) + | Xml.Element ("title", _, [Xml.PCData v]) -> + {acc with title= v} + | _ -> + acc ) - | Xml.Element ("title", _, [Xml.PCData v]) -> - {acc with title= v} - | _ -> - acc - ) - {default with update_type= ty} - update_nodes - |> assert_valid_updateinfo - in - debug "updateinfo: %s" (to_string ui) ; - Some ui - | _ -> - None - ) - children - |> assert_no_dup_update_id - |> List.map (fun updateinfo -> (updateinfo.id, updateinfo)) + {default with update_type= ty} + update_nodes + |> assert_valid_updateinfo + in + debug "updateinfo: %s" (to_string ui) ; + Some ui + | _ -> + None + ) + children + |> assert_no_dup_update_id + |> List.map (fun updateinfo -> (updateinfo.id, updateinfo)) + in + match (api_ver, uis) with + | Some v, [] -> + error + "Unexpected xapi-api-version: %s when there is no updates at all" + v ; + raise Api_errors.(Server_error (invalid_updateinfo_xml, [])) + | _, _ -> + (api_ver, uis) + ) | _ -> error "Failed to parse updateinfo.xml: missing " ; raise Api_errors.(Server_error (invalid_updateinfo_xml, [])) diff --git a/ocaml/xapi/updateinfo.mli b/ocaml/xapi/updateinfo.mli index 7a348db598c..8948d778d23 100644 --- a/ocaml/xapi/updateinfo.mli +++ b/ocaml/xapi/updateinfo.mli @@ -143,8 +143,10 @@ end (** The metadata of one update in updateinfo. *) module UpdateInfo : sig + type id_t = string + type t = { - id: string + id: id_t ; summary: string ; description: string ; guidance: GuidanceInUpdateInfo.t @@ -158,13 +160,15 @@ module UpdateInfo : sig ; title: string } + type api_ver_t = string + val to_json : t -> Yojson.Basic.t val guidance_to_string : Guidance.t option -> string - val of_xml : Xml.xml -> (string * t) list + val of_xml : Xml.xml -> api_ver_t option * (id_t * t) list - val of_xml_file : string -> (string * t) list + val of_xml_file : string -> api_ver_t option * (id_t * t) list val get_guidances_of_kind : kind:Guidance.kind -> t -> Guidance.t list end diff --git a/ocaml/xapi/vpx.ml b/ocaml/xapi/vpx.ml index a1cd5924987..2871ad6486f 100644 --- a/ocaml/xapi/vpx.ml +++ b/ocaml/xapi/vpx.ml @@ -27,7 +27,7 @@ type jobInfo = { ; importInfo: importInfo } -type dateTime = Xapi_stdext_date.Date.iso8601 +type dateTime = Xapi_stdext_date.Date.t type jobInstance = { id: string @@ -221,12 +221,12 @@ let serverInfo_of_rpc r = } } -let rpc_of_dateTime v = Rpc.DateTime (Xapi_stdext_date.Date.to_string v) +let rpc_of_dateTime v = Rpc.DateTime (Xapi_stdext_date.Date.to_rfc3339 v) let dateTime_of_rpc r = match r with | Rpc.DateTime v -> - Xapi_stdext_date.Date.of_string v + Xapi_stdext_date.Date.of_iso8601 v | x -> rpc_type_error x "DateTime" "DateTime(datetime)" diff --git a/ocaml/xapi/xapi_blob.ml b/ocaml/xapi/xapi_blob.ml index 6be9cdd9abd..f483f8d7835 100644 --- a/ocaml/xapi/xapi_blob.ml +++ b/ocaml/xapi/xapi_blob.ml @@ -24,7 +24,7 @@ let create ~__context ~mime_type ~public = if mime_type = "" then "application/octet-stream" else mime_type in Db.Blob.create ~__context ~ref ~uuid:(Uuidx.to_string uuid) ~public - ~mime_type:mime_type' ~size:0L ~last_updated:Xapi_stdext_date.Date.never + ~mime_type:mime_type' ~size:0L ~last_updated:Xapi_stdext_date.Date.epoch ~name_label:"" ~name_description:"" ; ref @@ -212,7 +212,7 @@ let handler (req : Http.Request.t) s _ = in Db.Blob.set_size ~__context ~self ~value:size ; Db.Blob.set_last_updated ~__context ~self - ~value:(Xapi_stdext_date.Date.of_float (Unix.gettimeofday ())) + ~value:(Xapi_stdext_date.Date.of_unix_time (Unix.gettimeofday ())) | _ -> failwith "Unsupported method for BLOB" in diff --git a/ocaml/xapi/xapi_cluster.ml b/ocaml/xapi/xapi_cluster.ml index cfa55fde2c7..355bf175527 100644 --- a/ocaml/xapi/xapi_cluster.ml +++ b/ocaml/xapi/xapi_cluster.ml @@ -126,6 +126,7 @@ let create ~__context ~pIF ~cluster_stack ~pool_auto_join ~token_timeout | Error error -> D.warn "Error occurred during Cluster.create. Shutting down cluster daemon" ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context ; handle_error error ) @@ -156,6 +157,7 @@ let destroy ~__context ~self = Db.Cluster.destroy ~__context ~self ; D.debug "Cluster destroyed successfully" ; set_ha_cluster_stack ~__context ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context (* Get pool master's cluster_host, return network of PIF *) diff --git a/ocaml/xapi/xapi_cluster_host.ml b/ocaml/xapi/xapi_cluster_host.ml index c55d789b8d9..9644ca8cd78 100644 --- a/ocaml/xapi/xapi_cluster_host.ml +++ b/ocaml/xapi/xapi_cluster_host.ml @@ -261,6 +261,7 @@ let destroy_op ~__context ~self ~force = ) ; Db.Cluster_host.destroy ~__context ~self ; debug "Cluster_host.%s was successful" fn_str ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context | Error error -> warn "Error occurred during Cluster_host.%s" fn_str ; @@ -361,7 +362,7 @@ let enable ~__context ~self = in (* TODO: Pass these through from CLI *) - if not !Xapi_clustering.Daemon.enabled then ( + if not (Xapi_clustering.Daemon.is_enabled ()) then ( D.debug "Cluster_host.enable: xapi-clusterd not running - attempting to start" ; Xapi_clustering.Daemon.enable ~__context diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index 249efa74da1..9f21b4c43c4 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -250,7 +250,9 @@ let assert_cluster_host_has_no_attached_sr_which_requires_cluster_stack raise Api_errors.(Server_error (cluster_stack_in_use, [cluster_stack])) module Daemon = struct - let enabled = ref false + let enabled = Atomic.make false + + let is_enabled () = Atomic.get enabled let maybe_call_script ~__context script params = match Context.get_test_clusterd_rpc __context with @@ -283,13 +285,13 @@ module Daemon = struct (internal_error, [Printf.sprintf "could not start %s" service]) ) ) ; - enabled := true ; + Atomic.set enabled true ; debug "Cluster daemon: enabled & started" let disable ~__context = let port = string_of_int !Xapi_globs.xapi_clusterd_port in debug "Disabling and stopping the clustering daemon" ; - enabled := false ; + Atomic.set enabled false ; maybe_call_script ~__context !Xapi_globs.systemctl ["disable"; service] ; maybe_call_script ~__context !Xapi_globs.systemctl ["stop"; service] ; maybe_call_script ~__context @@ -309,7 +311,7 @@ end * Instead of returning an empty URL which wouldn't work just raise an * exception. *) let rpc ~__context = - if not !Daemon.enabled then + if not (Daemon.is_enabled ()) then raise Api_errors.( Server_error @@ -427,6 +429,8 @@ let compute_corosync_max_host_failures ~__context = corosync_ha_max_hosts module Watcher = struct + module Delay = Xapi_stdext_threads.Threadext.Delay + let routine_updates = "routine updates" let on_corosync_update ~__context ~cluster updates = @@ -552,14 +556,40 @@ module Watcher = struct from corosync represents a consistent snapshot of the current cluster state. *) let stabilising_period = Mtime.Span.(5 * s) + (* The delay on which the watcher will wait. *) + let delay = Delay.make () + + let finish_watch = Atomic.make false + let cluster_stack_watcher : bool Atomic.t = Atomic.make false + (* This function exists to store the fact that the watcher should be destroyed, + to avoid the race that the cluster is destroyed, while the watcher is + still waiting/stabilising. + + There are two cases this function shall be called: 1. when the clustering + is to be disabled; 2. when this host is no longer the coordinator. For the second + case it is only necessary to do this when there is a manual designation of a new + master since in the case of ha the old coordinator would have died, and so would + this thread on the old coordinator. *) + let signal_exit () = + D.debug "%s: Signaled to exit cluster watcher" __FUNCTION__ ; + Delay.signal delay ; + (* set the cluster change watcher back to false as soon as we are signalled + to prevent any race conditions *) + Atomic.set cluster_change_watcher false ; + D.debug + "%s: watcher for cluster change exit, reset cluster_change_watcher back \ + to false" + __FUNCTION__ ; + Atomic.set finish_watch true + (* we handle unclean hosts join and leave in the watcher, i.e. hosts joining and leaving due to network problems, power cut, etc. Join and leave initiated by the API will be handled in the API call themselves, but they share the same code as the watcher. *) let watch_cluster_change ~__context ~host = - while !Daemon.enabled do + while not (Atomic.get finish_watch) do let m = Cluster_client.LocalClient.UPDATES.get (rpc ~__context) "cluster change watcher call" @@ -569,9 +599,13 @@ module Watcher = struct match find_cluster_host ~__context ~host with | Some ch -> let cluster = Db.Cluster_host.get_cluster ~__context ~self:ch in - if wait then - Thread.delay (Clock.Timer.span_to_s stabilising_period) ; - on_corosync_update ~__context ~cluster updates + if not wait then + on_corosync_update ~__context ~cluster updates + else if + wait + && Clock.Timer.span_to_s stabilising_period |> Delay.wait delay + then + on_corosync_update ~__context ~cluster updates | None -> () in @@ -591,55 +625,60 @@ module Watcher = struct | exception exn -> warn "%s: Got exception %s while query cluster host updates, retrying" __FUNCTION__ (Printexc.to_string exn) ; - Thread.delay (Clock.Timer.span_to_s cluster_change_interval) - done ; - Atomic.set cluster_change_watcher false + let _ : bool = + Clock.Timer.span_to_s cluster_change_interval |> Delay.wait delay + in + () + done let watch_cluster_stack_version ~__context ~host = - if !Daemon.enabled then - match find_cluster_host ~__context ~host with - | Some ch -> - let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self:ch in - let cluster_rec = - Db.Cluster.get_record ~__context ~self:cluster_ref - in - if - Cluster_stack.of_version - ( cluster_rec.API.cluster_cluster_stack - , cluster_rec.API.cluster_cluster_stack_version - ) - = Cluster_stack.Corosync2 - then ( - debug "%s: Detected Corosync 2 running as cluster stack" - __FUNCTION__ ; - let body = - "The current cluster stack version of Corosync 2 is out of date, \ - consider updating to Corosync 3" - in - let name, priority = Api_messages.cluster_stack_out_of_date in - let host_uuid = Db.Host.get_uuid ~__context ~self:host in - - Helpers.call_api_functions ~__context (fun rpc session_id -> - let _ : [> `message] Ref.t = - Client.Client.Message.create ~rpc ~session_id ~name ~priority - ~cls:`Host ~obj_uuid:host_uuid ~body - in - () + match find_cluster_host ~__context ~host with + | Some ch -> + let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self:ch in + let cluster_rec = Db.Cluster.get_record ~__context ~self:cluster_ref in + if + Cluster_stack.of_version + ( cluster_rec.API.cluster_cluster_stack + , cluster_rec.API.cluster_cluster_stack_version ) + = Cluster_stack.Corosync2 + then ( + debug "%s: Detected Corosync 2 running as cluster stack" __FUNCTION__ ; + let body = + "The current cluster stack version of Corosync 2 is out of date, \ + consider updating to Corosync 3" + in + let name, priority = Api_messages.cluster_stack_out_of_date in + let host_uuid = Db.Host.get_uuid ~__context ~self:host in + + Helpers.call_api_functions ~__context (fun rpc session_id -> + let _ : [> `message] Ref.t = + Client.Client.Message.create ~rpc ~session_id ~name ~priority + ~cls:`Host ~obj_uuid:host_uuid ~body + in + () ) - | None -> - debug "%s: No cluster host, no need to watch" __FUNCTION__ + ) else + debug + "%s: Detected Corosync 3 as cluster stack, not generating a \ + warning messsage" + __FUNCTION__ + | None -> + debug "%s: No cluster host, no need to watch" __FUNCTION__ (** [create_as_necessary] will create cluster watchers on the coordinator if they are not already created. There is no need to destroy them: once the clustering daemon is disabled, these threads will exit as well. *) let create_as_necessary ~__context ~host = - if Helpers.is_pool_master ~__context ~host then ( + let is_master = Helpers.is_pool_master ~__context ~host in + let daemon_enabled = Daemon.is_enabled () in + if is_master && daemon_enabled then ( if Xapi_cluster_helpers.cluster_health_enabled ~__context then if Atomic.compare_and_set cluster_change_watcher false true then ( debug "%s: create watcher for corosync-notifyd on coordinator" __FUNCTION__ ; + Atomic.set finish_watch false ; let _ : Thread.t = Thread.create (fun () -> watch_cluster_change ~__context ~host) () in @@ -666,5 +705,9 @@ module Watcher = struct ) else debug "%s: not create watcher for cluster stack as it already exists" __FUNCTION__ - ) + ) else + debug + "%s not create watcher because we are %b master and clustering is \ + enabled %b " + __FUNCTION__ is_master daemon_enabled end diff --git a/ocaml/xapi/xapi_clustering.mli b/ocaml/xapi/xapi_clustering.mli new file mode 100644 index 00000000000..7fceae58118 --- /dev/null +++ b/ocaml/xapi/xapi_clustering.mli @@ -0,0 +1,91 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +val set_ha_cluster_stack : __context:Context.t -> unit + +val with_clustering_lock : string -> (unit -> 'a) -> 'a + +val pif_of_host : + __context:Context.t -> API.ref_network -> API.ref_host -> 'a Ref.t * API.pIF_t + +val ip_of_pif : 'a Ref.t * API.pIF_t -> Cluster_interface.address + +val assert_pif_prerequisites : 'a Ref.t * API.pIF_t -> unit + +val assert_pif_attached_to : + __context:Context.t -> host:[`host] Ref.t -> pIF:[`PIF] Ref.t -> unit + +val handle_error : Cluster_interface.error -> 'a + +val assert_cluster_host_can_be_created : + __context:Context.t -> host:'a Ref.t -> unit + +val get_required_cluster_stacks : + __context:Context.t -> sr_sm_type:string -> string list + +val assert_cluster_stack_valid : cluster_stack:string -> unit + +val with_clustering_lock_if_needed : + __context:Context.t -> sr_sm_type:string -> string -> (unit -> 'a) -> 'a + +val with_clustering_lock_if_cluster_exists : + __context:Context.t -> string -> (unit -> 'a) -> 'a + +val find_cluster_host : + __context:Context.t -> host:[`host] Ref.t -> 'a Ref.t option + +val get_network_internal : + __context:Context.t -> self:[`Cluster] Ref.t -> [`network] Ref.t + +val assert_cluster_host_enabled : + __context:Context.t -> self:[`Cluster_host] Ref.t -> expected:bool -> unit + +val assert_operation_host_target_is_localhost : + __context:Context.t -> host:[`host] Ref.t -> unit + +val assert_cluster_host_has_no_attached_sr_which_requires_cluster_stack : + __context:Context.t -> self:[`Cluster_host] Ref.t -> unit + +module Daemon : sig + val is_enabled : unit -> bool + + val enable : __context:Context.t -> unit + + val disable : __context:Context.t -> unit + + val restart : __context:Context.t -> unit +end + +val rpc : __context:Context.t -> Rpc.call -> Rpc.response Idl.IdM.t + +val maybe_switch_cluster_stack_version : + __context:Context.t + -> self:'a Ref.t + -> cluster_stack:Cluster_interface.Cluster_stack.t + -> unit + +val assert_cluster_host_is_enabled_for_matching_sms : + __context:Context.t -> host:[`host] Ref.t -> sr_sm_type:string -> unit + +val is_clustering_disabled_on_host : + __context:Context.t -> [`host] Ref.t -> bool + +val compute_corosync_max_host_failures : __context:Context.t -> int + +module Watcher : sig + val on_corosync_update : + __context:Context.t -> cluster:[`Cluster] Ref.t -> string list -> unit + + val signal_exit : unit -> unit + + val create_as_necessary : __context:Context.t -> host:[`host] Ref.t -> unit +end diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 37c62e04e9f..cbaa7430e88 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -712,6 +712,10 @@ let host_assumed_dead_interval = ref Mtime.Span.(10 * min) (* If a session has a last_active older than this we delete it *) let inactive_session_timeout = ref 86400. (* 24 hrs in seconds *) +(* If a session was refreshed more recently than threshold_last_active do not refresh it again. *) +let threshold_last_active = ref (Ptime.Span.of_int_s 600) +(* 10 min in seconds *) + let pending_task_timeout = ref 86400. (* 24 hrs in seconds *) let completed_task_timeout = ref 3900. (* 65 mins *) @@ -1015,7 +1019,7 @@ let trace_log_dir = ref "/var/log/dt/zipkinv2/json" let export_interval = ref 30. -let max_spans = ref 1000 +let max_spans = ref 10000 let max_traces = ref 10000 @@ -1631,6 +1635,11 @@ let other_options = , (fun () -> string_of_int !external_authentication_cache_size) , "Specify the maximum capacity of the external authentication cache" ) + ; ( "threshold_last_active" + , Arg.Int (fun t -> threshold_last_active := Ptime.Span.of_int_s t) + , (fun () -> Format.asprintf "%a" Ptime.Span.pp !threshold_last_active) + , "Specify the threshold below which we do not refresh the session" + ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index ffe5b8ae618..bd13e808ec8 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -68,6 +68,10 @@ let os_version = ("attr/os/spminor", "spminor") (* windows *) ] +let netbios_name = [("data/host_name_dns", "host_name")] + +let dns_domain = [("data/domain", "dns_domain")] + let memory = [("data/meminfo_free", "free"); ("data/meminfo_total", "total")] let device_id = [("data/device_id", "device_id")] @@ -215,6 +219,7 @@ type m = (string * string) list type guest_metrics_t = { pv_drivers_version: m ; os_version: m + ; netbios_name: m ; networks: m ; other: m ; memory: m @@ -269,6 +274,14 @@ let get_initial_guest_metrics (lookup : string -> string option) in let pv_drivers_version = to_map pv_drivers_version and os_version = to_map os_version + and netbios_name = + match to_map dns_domain with + | [] -> + to_map netbios_name + | (_, dns_domain) :: _ -> + List.map + (fun (k, v) -> (k, Printf.sprintf "%s.%s" v dns_domain)) + (to_map netbios_name) and device_id = to_map device_id and networks = to_map @@ -294,6 +307,7 @@ let get_initial_guest_metrics (lookup : string -> string option) { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -311,11 +325,11 @@ let create_and_set_guest_metrics (lookup : string -> string option) let new_gm_uuid = Uuidx.to_string (Uuidx.make ()) and new_gm_ref = Ref.make () in Db.VM_guest_metrics.create ~__context ~ref:new_gm_ref ~uuid:new_gm_uuid - ~os_version:initial_gm.os_version + ~os_version:initial_gm.os_version ~netbios_name:initial_gm.netbios_name ~pV_drivers_version:initial_gm.pv_drivers_version ~pV_drivers_up_to_date:pV_drivers_detected ~memory:[] ~disks:[] ~networks:initial_gm.networks ~pV_drivers_detected ~other:initial_gm.other - ~last_updated:(Date.of_float initial_gm.last_updated) + ~last_updated:(Date.of_unix_time initial_gm.last_updated) ~other_config:[] ~live:true ~can_use_hotplug_vbd:initial_gm.can_use_hotplug_vbd ~can_use_hotplug_vif:initial_gm.can_use_hotplug_vif ; @@ -339,6 +353,7 @@ let all (lookup : string -> string option) (list : string -> string list) let { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -372,6 +387,7 @@ let all (lookup : string -> string option) (list : string -> string list) { pv_drivers_version= [] ; os_version= [] + ; netbios_name= [] ; networks= [] ; other= [] ; memory= [] @@ -388,6 +404,7 @@ let all (lookup : string -> string option) (list : string -> string list) { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -401,6 +418,7 @@ let all (lookup : string -> string option) (list : string -> string list) if (guest_metrics_cached.pv_drivers_version <> pv_drivers_version || guest_metrics_cached.os_version <> os_version + || guest_metrics_cached.netbios_name <> netbios_name || guest_metrics_cached.networks <> networks || guest_metrics_cached.other <> other || guest_metrics_cached.device_id <> device_id @@ -431,6 +449,9 @@ let all (lookup : string -> string option) (list : string -> string list) ~value:pv_drivers_version ; if guest_metrics_cached.os_version <> os_version then Db.VM_guest_metrics.set_os_version ~__context ~self:gm ~value:os_version ; + if guest_metrics_cached.netbios_name <> netbios_name then + Db.VM_guest_metrics.set_netbios_name ~__context ~self:gm + ~value:netbios_name ; if guest_metrics_cached.networks <> networks then Db.VM_guest_metrics.set_networks ~__context ~self:gm ~value:networks ; if guest_metrics_cached.other <> other then ( @@ -448,7 +469,7 @@ let all (lookup : string -> string option) (list : string -> string list) (* if(guest_metrics_cached.memory <> memory) then Db.VM_guest_metrics.set_memory ~__context ~self:gm ~value:memory; *) Db.VM_guest_metrics.set_last_updated ~__context ~self:gm - ~value:(Date.of_float last_updated) ; + ~value:(Date.of_unix_time last_updated) ; if guest_metrics_cached.device_id <> device_id then if List.mem_assoc Xapi_globs.device_id_key_name device_id then ( let value = List.assoc Xapi_globs.device_id_key_name device_id in diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 578788f8c9c..ddfbc357fb2 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -1463,7 +1463,7 @@ let rec propose_new_master_internal ~__context ~address ~manual = (Printf.sprintf "Already agreed to commit host address '%s' at %s ('%f' secs ago)" x - (Date.to_string (Date.of_float !proposed_master_time)) + (Date.to_rfc3339 (Date.of_unix_time !proposed_master_time)) diff ) | None -> diff --git a/ocaml/xapi/xapi_hooks.ml b/ocaml/xapi/xapi_hooks.ml index abb29dd4f52..ecc1a258063 100644 --- a/ocaml/xapi/xapi_hooks.ml +++ b/ocaml/xapi/xapi_hooks.ml @@ -20,6 +20,8 @@ let scriptname__host_pre_declare_dead = "host-pre-declare-dead" let scriptname__host_post_declare_dead = "host-post-declare-dead" +let scriptname__xapi_pre_shutdown = "xapi-pre-shutdown" + (* Host Script hook reason codes *) let reason__fenced = "fenced" @@ -71,11 +73,15 @@ let execute_hook ~__context ~script_name ~args ~reason = try debug "Executing hook '%s/%s' with args [ %s ]" script_name script (String.concat "; " args) ; - ignore - (Forkhelpers.execute_command_get_output - (Filename.concat script_dir script) - args - ) + let os, es = + Forkhelpers.execute_command_get_output + (Filename.concat script_dir script) + args + in + debug + "%s: Output of executing hook '%s/%s' with args [ %s ] is %s, err is \ + %s" + __FUNCTION__ script_name script (String.concat "; " args) os es with | Forkhelpers.Spawn_internal_error (_, stdout, Unix.WEXITED i) (* i<>0 since that case does not generate exn *) @@ -123,6 +129,12 @@ let host_pre_declare_dead ~__context ~host ~reason = () ) +let xapi_pre_shutdown ~__context ~host ~reason = + info "%s Running xapi pre shutdown hooks for %s" __FUNCTION__ + (Ref.string_of host) ; + execute_host_hook ~__context ~script_name:scriptname__xapi_pre_shutdown + ~reason ~host + (* Called when host died -- !! hook code in here to abort outstanding forwarded ops *) let internal_host_dead_hook __context host = info "Running host dead hook for %s" (Ref.string_of host) ; diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 05955958813..aa2f07e2fba 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -43,7 +43,7 @@ let take n xs = in loop n [] xs -let get_servertime ~__context ~host:_ = Date.of_float (Unix.gettimeofday ()) +let get_servertime ~__context ~host:_ = Date.now () let get_server_localtime ~__context ~host:_ = Date.localtime () @@ -778,6 +778,9 @@ let restart_agent ~__context ~host:_ = let shutdown_agent ~__context = debug "Host.restart_agent: Host agent will shutdown in 1s!!!!" ; + let localhost = Helpers.get_localhost ~__context in + Xapi_hooks.xapi_pre_shutdown ~__context ~host:localhost + ~reason:Xapi_hooks.reason__clean_shutdown ; Xapi_fuse.light_fuse_and_dont_restart ~fuse_length:1. () let disable ~__context ~host = @@ -1006,7 +1009,7 @@ let create ~__context ~uuid ~name_label ~name_description:_ ~hostname ~address let make_new_metrics_object ref = Db.Host_metrics.create ~__context ~ref ~uuid:(Uuidx.to_string (Uuidx.make ())) - ~live:false ~memory_total:0L ~memory_free:0L ~last_updated:Date.never + ~live:false ~memory_total:0L ~memory_free:0L ~last_updated:Date.epoch ~other_config:[] in let name_description = "Default install" and host = Ref.make () in @@ -1055,8 +1058,7 @@ let create ~__context ~uuid ~name_label ~name_description:_ ~hostname ~address ~latest_synced_updates_applied:`unknown ~pending_guidances_recommended:[] ~pending_guidances_full:[] ~last_update_hash:"" ; (* If the host we're creating is us, make sure its set to live *) - Db.Host_metrics.set_last_updated ~__context ~self:metrics - ~value:(Date.of_float (Unix.gettimeofday ())) ; + Db.Host_metrics.set_last_updated ~__context ~self:metrics ~value:(Date.now ()) ; Db.Host_metrics.set_live ~__context ~self:metrics ~value:host_is_us ; host @@ -1928,6 +1930,11 @@ let disable_external_auth_common ?(during_pool_eject = false) ~__context ~host (* succeeds because there's no need to initialize anymore *) + (* If any cache is present, clear it in order to ensure cached + logins don't persist after disabling external + authentication. *) + Xapi_session.clear_external_auth_cache () ; + (* 3. CP-703: we always revalidate all sessions after the external authentication has been disabled *) (* so that all sessions that were externally authenticated will be destroyed *) debug diff --git a/ocaml/xapi/xapi_host.mli b/ocaml/xapi/xapi_host.mli index 39f20223c13..8813f037b19 100644 --- a/ocaml/xapi/xapi_host.mli +++ b/ocaml/xapi/xapi_host.mli @@ -77,7 +77,7 @@ val retrieve_wlb_evacuate_recommendations : val restart_agent : __context:'a -> host:'b -> unit -val shutdown_agent : __context:'a -> unit +val shutdown_agent : __context:Context.t -> unit val disable : __context:Context.t -> host:[`host] Ref.t -> unit @@ -272,10 +272,9 @@ val sync_data : __context:Context.t -> host:API.ref_host -> unit val backup_rrds : __context:Context.t -> host:'b -> delay:float -> unit -val get_servertime : __context:'a -> host:'b -> Xapi_stdext_date.Date.iso8601 +val get_servertime : __context:'a -> host:'b -> Xapi_stdext_date.Date.t -val get_server_localtime : - __context:'a -> host:'b -> Xapi_stdext_date.Date.iso8601 +val get_server_localtime : __context:'a -> host:'b -> Xapi_stdext_date.Date.t val enable_binary_storage : __context:Context.t -> host:[`host] Ref.t -> unit diff --git a/ocaml/xapi/xapi_host_crashdump.ml b/ocaml/xapi/xapi_host_crashdump.ml index f16255e2c97..e8095ba6259 100644 --- a/ocaml/xapi/xapi_host_crashdump.ml +++ b/ocaml/xapi/xapi_host_crashdump.ml @@ -134,7 +134,7 @@ let resynchronise ~__context ~host = with _ -> (Unix.stat (Filename.concat crash_dir filename)).Unix.st_ctime in - let timestamp = Date.of_float timestamp in + let timestamp = Date.of_unix_time timestamp in let r = Ref.make () and uuid = Uuidx.to_string (Uuidx.make ()) in Db.Host_crashdump.create ~__context ~ref:r ~uuid ~other_config:[] ~host ~timestamp ~size ~filename diff --git a/ocaml/xapi/xapi_local_session.ml b/ocaml/xapi/xapi_local_session.ml index 148c776ca2f..7a5cf5f5070 100644 --- a/ocaml/xapi/xapi_local_session.ml +++ b/ocaml/xapi/xapi_local_session.ml @@ -14,11 +14,7 @@ (** Code to handle local sessions, used so that slaves can communicate even when the master is down. *) -type t = { - r: API.ref_session - ; pool: bool - ; last_active: Xapi_stdext_date.Date.iso8601 -} +type t = {r: API.ref_session; pool: bool; last_active: Xapi_stdext_date.Date.t} let with_lock = Xapi_stdext_threads.Threadext.Mutex.execute @@ -31,13 +27,7 @@ let get_all ~__context = let create ~__context ~pool = let r = Ref.make () in - let session = - { - r - ; pool - ; last_active= Xapi_stdext_date.Date.of_float (Unix.gettimeofday ()) - } - in + let session = {r; pool; last_active= Xapi_stdext_date.Date.now ()} in with_lock m (fun () -> Hashtbl.replace table r session) ; r diff --git a/ocaml/xapi/xapi_local_session.mli b/ocaml/xapi/xapi_local_session.mli index dbba9a57a8b..c7859879990 100644 --- a/ocaml/xapi/xapi_local_session.mli +++ b/ocaml/xapi/xapi_local_session.mli @@ -13,11 +13,7 @@ *) (** Represents local sessions, for use in emergency mode *) -type t = { - r: API.ref_session - ; pool: bool - ; last_active: Xapi_stdext_date.Date.iso8601 -} +type t = {r: API.ref_session; pool: bool; last_active: Xapi_stdext_date.Date.t} val get_all : __context:Context.t -> API.ref_session list diff --git a/ocaml/xapi/xapi_message.ml b/ocaml/xapi/xapi_message.ml index 50621a9aa9c..8bc43cc48e8 100644 --- a/ocaml/xapi/xapi_message.ml +++ b/ocaml/xapi/xapi_message.ml @@ -71,9 +71,9 @@ let to_xml output _ref gen message = tag "ref" [data (Ref.string_of _ref)] ; tag "name" [data message.API.message_name] ; tag "priority" [data (Int64.to_string message.API.message_priority)] - ; tag "cls" [data (Record_util.class_to_string message.API.message_cls)] + ; tag "cls" [data (Record_util.cls_to_string message.API.message_cls)] ; tag "obj_uuid" [data message.API.message_obj_uuid] - ; tag "timestamp" [data (Date.to_string message.API.message_timestamp)] + ; tag "timestamp" [data (Date.to_rfc3339 message.API.message_timestamp)] ; tag "uuid" [data message.API.message_uuid] ; tag "body" [data message.API.message_body] ] @@ -96,7 +96,7 @@ let of_xml input = ; API.message_priority= 0L ; API.message_cls= `VM ; API.message_obj_uuid= "" - ; API.message_timestamp= Date.never + ; API.message_timestamp= Date.epoch ; API.message_body= "" ; API.message_uuid= "" } @@ -119,11 +119,12 @@ let of_xml input = message := {!message with API.message_priority= Int64.of_string dat} | "cls" -> message := - {!message with API.message_cls= Record_util.string_to_class dat} + {!message with API.message_cls= Record_util.cls_of_string dat} | "obj_uuid" -> message := {!message with API.message_obj_uuid= dat} | "timestamp" -> - message := {!message with API.message_timestamp= Date.of_string dat} + message := + {!message with API.message_timestamp= Date.of_iso8601 dat} | "uuid" -> message := {!message with API.message_uuid= dat} | "body" -> @@ -188,7 +189,7 @@ let import_xml xml_in = (********** Symlink functions *************) let class_symlink cls obj_uuid = - let strcls = Record_util.class_to_string cls in + let strcls = Record_util.cls_to_string cls in Printf.sprintf "%s/%s/%s" message_dir strcls obj_uuid let uuid_symlink () = Printf.sprintf "%s/uuids" message_dir @@ -342,7 +343,7 @@ let write ~__context ~_ref ~message = ) ) ; Unixext.mkdir_rec message_dir 0o700 ; - let timestamp = ref (Date.to_float message.API.message_timestamp) in + let timestamp = ref (Date.to_unix_time message.API.message_timestamp) in if message_exists () then Some (message_gen ()) else @@ -411,14 +412,14 @@ let write ~__context ~_ref ~message = if write failed, or message ref otherwise. *) let create ~__context ~name ~priority ~cls ~obj_uuid ~body = debug "Message.create %s %Ld %s %s" name priority - (Record_util.class_to_string cls) + (Record_util.cls_to_string cls) obj_uuid ; if not (Encodings.UTF8_XML.is_valid body) then raise (Api_errors.Server_error (Api_errors.invalid_value, ["UTF8 expected"])) ; if not (check_uuid ~__context ~cls ~uuid:obj_uuid) then raise (Api_errors.Server_error - (Api_errors.uuid_invalid, [Record_util.class_to_string cls; obj_uuid]) + (Api_errors.uuid_invalid, [Record_util.cls_to_string cls; obj_uuid]) ) ; let _ref = Ref.make () in let uuid = Uuidx.to_string (Uuidx.make ()) in @@ -442,7 +443,7 @@ let create ~__context ~name ~priority ~cls ~obj_uuid ~body = ; API.message_priority= priority ; API.message_cls= cls ; API.message_obj_uuid= obj_uuid - ; API.message_timestamp= Date.of_float timestamp + ; API.message_timestamp= Date.of_unix_time timestamp ; API.message_body= body } in @@ -596,8 +597,8 @@ let get_real_inner dir filter name_filter = r else compare - (Date.to_float m2.API.message_timestamp) - (Date.to_float m1.API.message_timestamp) + (Date.to_unix_time m2.API.message_timestamp) + (Date.to_unix_time m1.API.message_timestamp) ) messages with _ -> [] @@ -631,16 +632,16 @@ let get ~__context ~cls ~obj_uuid ~since = (* Read in all the messages for a particular object *) let class_symlink = class_symlink cls obj_uuid in if not (check_uuid ~__context ~cls ~uuid:obj_uuid) then - raise (Api_errors.Server_error (Api_errors.uuid_invalid, [])) ; + raise Api_errors.(Server_error (uuid_invalid, [])) ; let msg = get_real_inner class_symlink - (fun msg -> Date.to_float msg.API.message_timestamp > Date.to_float since) + (fun msg -> Date.is_later msg.API.message_timestamp ~than:since) (fun _ -> true) in List.map (fun (_, b, c) -> (b, c)) msg let get_since ~__context ~since = - get_real message_dir (fun _ -> true) (Date.to_float since) + get_real message_dir (fun _ -> true) (Date.to_unix_time since) let get_since_for_events ~__context since = let cached_result = @@ -747,7 +748,7 @@ let repopulate_cache () = let last_256 = Listext.List.take 256 messages in in_memory_cache := last_256 ; let get_ts (ts, _, m) = - Printf.sprintf "%Ld (%s)" ts (Date.to_string m.API.message_timestamp) + Printf.sprintf "%Ld (%s)" ts (Date.to_rfc3339 m.API.message_timestamp) in debug "Constructing in-memory-cache: most length=%d" (List.length last_256) ; ( try @@ -800,7 +801,7 @@ let handler (req : Http.Request.t) fd _ = else (* Get and check query parameters *) let uuid = List.assoc "uuid" query and cls = List.assoc "cls" query in let cls = - try Record_util.string_to_class cls + try Record_util.cls_of_string cls with _ -> failwith ("Xapi_message.handler: Bad class " ^ cls) in if not (check_uuid ~__context ~cls ~uuid) then @@ -829,7 +830,7 @@ let send_messages ~__context ~cls ~obj_uuid ~session_id ~remote_address let query = [ ("session_id", Ref.string_of session_id) - ; ("cls", Record_util.class_to_string cls) + ; ("cls", Record_util.cls_to_string cls) ; ("uuid", obj_uuid) ] in diff --git a/ocaml/xapi/xapi_network.ml b/ocaml/xapi/xapi_network.ml index 3aefbad3be8..37d527a2a34 100644 --- a/ocaml/xapi/xapi_network.ml +++ b/ocaml/xapi/xapi_network.ml @@ -439,6 +439,7 @@ let assert_can_add_purpose ~__context ~network:_ ~current:_ newval = assert_no_net_has_bad_porpoise [`nbd] let add_purpose ~__context ~self ~value = + assert_network_is_managed ~__context ~self ; let current = Db.Network.get_purpose ~__context ~self in if not (List.mem value current) then ( assert_can_add_purpose ~__context ~network:self ~current value ; diff --git a/ocaml/xapi/xapi_network_sriov_helpers.ml b/ocaml/xapi/xapi_network_sriov_helpers.ml index 952a7c35270..6600f6a2f44 100644 --- a/ocaml/xapi/xapi_network_sriov_helpers.ml +++ b/ocaml/xapi/xapi_network_sriov_helpers.ml @@ -56,7 +56,7 @@ let sriov_bring_up ~__context ~self = in info "Enable network sriov on PIF %s successful, mode: %s need_reboot: %b" (Ref.string_of physical_pif) - (Record_util.network_sriov_configuration_mode_to_string mode) + (Record_util.sriov_configuration_mode_to_string mode) require_reboot ; Db.Network_sriov.set_configuration_mode ~__context ~self:sriov ~value:mode ; Db.Network_sriov.set_requires_reboot ~__context ~self:sriov diff --git a/ocaml/xapi/xapi_observer_components.ml b/ocaml/xapi/xapi_observer_components.ml index 797c236b248..d3e0587b143 100644 --- a/ocaml/xapi/xapi_observer_components.ml +++ b/ocaml/xapi/xapi_observer_components.ml @@ -48,7 +48,9 @@ let all = List.map of_string Constants.observer_components_all This does mean that observer will always be enabled for clusterd. *) let startup_components () = List.filter - (function Xapi_clusterd -> !Xapi_clustering.Daemon.enabled | _ -> true) + (function + | Xapi_clusterd -> Xapi_clustering.Daemon.is_enabled () | _ -> true + ) all let assert_valid_components components = diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index d6d7a16a692..56dff779240 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -347,7 +347,8 @@ let assert_fcoe_not_in_use ~__context ~self = () ) -let find_or_create_network (bridge : string) (device : string) ~__context = +let find_or_create_network (bridge : string) (device : string) ~managed + ~__context = let nets = Db.Network.get_refs_where ~__context ~expr:(Eq (Field "bridge", Literal bridge)) @@ -362,7 +363,7 @@ let find_or_create_network (bridge : string) (device : string) ~__context = Db.Network.create ~__context ~ref:net_ref ~uuid:net_uuid ~current_operations:[] ~allowed_operations:[] ~name_label:(Helpers.choose_network_name_for_pif device) - ~name_description:"" ~mTU:1500L ~purpose:[] ~bridge ~managed:true + ~name_description:"" ~mTU:1500L ~purpose:[] ~bridge ~managed ~other_config:[] ~blobs:[] ~tags:[] ~default_locking_mode:`unlocked ~assigned_ips:[] in @@ -411,7 +412,7 @@ let make_pif_metrics ~__context = Db.PIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~carrier:false ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" ~speed:0L ~duplex:false ~pci_bus_path:"" ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Date.of_float 0.) ~other_config:[] + ~last_updated:Date.epoch ~other_config:[] in metrics @@ -457,13 +458,13 @@ let db_forget ~__context ~self = Db.PIF.destroy ~__context ~self let introduce_internal ?network ?(physical = true) ~t:_ ~__context ~host ~mAC ~mTU ~device ~vLAN ~vLAN_master_of ?metrics ~managed ?(disallow_unplug = false) () = - let bridge = bridge_naming_convention device in + let bridge = if managed then bridge_naming_convention device else "" in (* If we are not told which network to use, * apply the default convention *) let net_ref = match network with | None -> - find_or_create_network bridge device ~__context + find_or_create_network bridge device ~managed ~__context | Some x -> x in @@ -667,6 +668,8 @@ let scan ~__context ~host = ([], []) ) in + debug "non-managed devices=%s" (String.concat "," non_managed_devices) ; + debug "disallow-unplug devices=%s" (String.concat "," disallow_unplug_devices) ; Xapi_stdext_threads.Threadext.Mutex.execute scan_m (fun () -> let t = make_tables ~__context ~host in let devices_not_yet_represented_by_pifs = @@ -681,6 +684,8 @@ let scan ~__context ~host = let mTU = Int64.of_int (Net.Interface.get_mtu dbg device) in let managed = not (List.mem device non_managed_devices) in let disallow_unplug = List.mem device disallow_unplug_devices in + debug "About to introduce %s, managed=%b, disallow-unplug=%b" device + managed disallow_unplug ; let (_ : API.ref_PIF) = introduce_internal ~t ~__context ~host ~mAC ~mTU ~vLAN:(-1L) ~vLAN_master_of:Ref.null ~device ~managed ~disallow_unplug () diff --git a/ocaml/xapi/xapi_pif.mli b/ocaml/xapi/xapi_pif.mli index 93bacd86be5..07c3a85877c 100644 --- a/ocaml/xapi/xapi_pif.mli +++ b/ocaml/xapi/xapi_pif.mli @@ -175,12 +175,6 @@ val assert_usable_for_management : -> unit (** Ensure the PIF can be used for management. *) -val find_or_create_network : - string -> string -> __context:Context.t -> [`network] Ref.t -(** If a network for the given bridge already exists, then return a reference to this network, - * otherwise create a new network and return its reference. -*) - (** Convenient lookup tables for scanning etc *) type tables diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 4fdefa0f8fb..49ea7194dc9 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -3127,10 +3127,10 @@ let get_license_state ~__context ~self:_ = | None -> "never" | Some date -> - if date = Date.of_float License_check.never then + if date = Date.of_unix_time License_check.never then "never" else - Date.to_string date + Date.to_rfc3339 date in [("edition", pool_edition); ("expiry", pool_expiry)] @@ -3285,7 +3285,7 @@ let alert_failed_login_attempts () = let now = Date.localtime () in let login_failures_between = Printf.sprintf "login failures between '%s' and last check" - (Date.to_string now) + (Date.to_rfc3339 now) in match Xapi_session.get_failed_login_stats () with | None -> @@ -3678,7 +3678,7 @@ let set_telemetry_next_collection ~__context ~self ~value = let err_msg = "Can't parse date and time for telemetry collection." in raise Api_errors.(Server_error (internal_error, [err_msg])) in - let ts = Date.to_string value in + let ts = Date.to_rfc3339 value in match Ptime.is_later dt_of_value ~than:dt_of_max_sched with | true -> raise Api_errors.(Server_error (telemetry_next_collection_too_late, [ts])) diff --git a/ocaml/xapi/xapi_pool.mli b/ocaml/xapi/xapi_pool.mli index 5fc33c66cad..9e74ea3f373 100644 --- a/ocaml/xapi/xapi_pool.mli +++ b/ocaml/xapi/xapi_pool.mli @@ -397,7 +397,7 @@ val set_https_only : val set_telemetry_next_collection : __context:Context.t -> self:API.ref_pool - -> value:Xapi_stdext_date.Date.iso8601 + -> value:Xapi_stdext_date.Date.t -> unit val reset_telemetry_uuid : __context:Context.t -> self:API.ref_pool -> unit diff --git a/ocaml/xapi/xapi_pool_helpers.ml b/ocaml/xapi/xapi_pool_helpers.ml index 16309c7bd51..ec281ade966 100644 --- a/ocaml/xapi/xapi_pool_helpers.ml +++ b/ocaml/xapi/xapi_pool_helpers.ml @@ -138,7 +138,7 @@ let throw_error table op = Printf.sprintf "xapi_pool_helpers.assert_operation_valid unknown operation: \ %s" - (pool_operation_to_string op) + (pool_allowed_operations_to_string op) ] ) ) @@ -202,7 +202,7 @@ let assert_no_pool_ops ~__context = let err = ops |> List.map snd - |> List.map Record_util.pool_operation_to_string + |> List.map Record_util.pool_allowed_operations_to_string |> String.concat "; " |> Printf.sprintf "pool operations in progress: [ %s ]" in diff --git a/ocaml/xapi/xapi_pool_license.ml b/ocaml/xapi/xapi_pool_license.ml index 5e69d64dee1..a37805127db 100644 --- a/ocaml/xapi/xapi_pool_license.ml +++ b/ocaml/xapi/xapi_pool_license.ml @@ -17,8 +17,8 @@ module D = Debug.Make (struct let name = "xapi_pool_license" end) open D (* Compare two date options, where None is always greater than (Some _) *) -let compare_dates (a : Xapi_stdext_date.Date.iso8601 option) - (b : Xapi_stdext_date.Date.iso8601 option) = +let compare_dates (a : Xapi_stdext_date.Date.t option) + (b : Xapi_stdext_date.Date.t option) = match (a, b) with | None, None -> 0 diff --git a/ocaml/xapi/xapi_pool_patch.ml b/ocaml/xapi/xapi_pool_patch.ml index 5988a1abc7c..72033070bc5 100644 --- a/ocaml/xapi/xapi_pool_patch.ml +++ b/ocaml/xapi/xapi_pool_patch.ml @@ -140,7 +140,7 @@ let get_patch_applied_to ~__context ~patch ~host = let write_patch_applied_db ~__context ?date ?(applied = true) ~self ~host () = let date = - Xapi_stdext_date.Date.of_float + Xapi_stdext_date.Date.of_unix_time (match date with Some d -> d | None -> Unix.gettimeofday ()) in match get_patch_applied_to ~__context ~patch:self ~host with diff --git a/ocaml/xapi/xapi_pool_transition.ml b/ocaml/xapi/xapi_pool_transition.ml index 6ff8f892bd9..8f6a315f591 100644 --- a/ocaml/xapi/xapi_pool_transition.ml +++ b/ocaml/xapi/xapi_pool_transition.ml @@ -215,6 +215,8 @@ let become_another_masters_slave master_address = if Pool_role.get_role () = new_role then debug "We are already a slave of %s; nothing to do" master_address else ( + if Pool_role.is_master () then (* I am the old master *) + Xapi_clustering.Watcher.signal_exit () ; debug "Setting pool.conf to point to %s" master_address ; set_role new_role ; run_external_scripts false ; diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 802013ed326..72a0ff7c705 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -41,7 +41,7 @@ module AuthFail : sig val on_fail : __context:Context.t - -> now:Date.iso8601 + -> now:Date.t -> uname:string option -> originator:string option -> record:[< `log_only | `log_and_alert] @@ -85,7 +85,7 @@ end = struct type client_failed_attempts = { client: client ; num_failed_attempts: int - ; last_failed_attempt: Date.iso8601 + ; last_failed_attempt: Date.t } let up_to_3 xs x = @@ -103,7 +103,7 @@ end = struct |} (string_of_client x.client) x.num_failed_attempts - (Date.to_string x.last_failed_attempt) + (Date.to_rfc3339 x.last_failed_attempt) type stats = { total_num_failed_attempts: int @@ -143,7 +143,7 @@ end = struct val get : unit -> stats option (* returns the number of failures from this client since last call to [ get ] *) - val record_client : client -> now:Date.iso8601 -> int + val record_client : client -> now:Date.t -> int (* returns number of failures from unknown clients since last call to [ get ] *) val record_unknown : unit -> int @@ -159,7 +159,7 @@ end = struct ctr ) - type value = {num_failed_attempts: int; last_failed_attempt: Date.iso8601} + type value = {num_failed_attempts: int; last_failed_attempt: Date.t} let table = Hashtbl.create 10 @@ -247,7 +247,7 @@ let _record_login_failure ~__context ~now ~uname ~originator ~record f = let record_login_failure ~__context ~uname ~originator ~record f = Context.with_tracing ?originator ~__context __FUNCTION__ @@ fun __context -> - let now = Unix.time () |> Date.of_float in + let now = Date.now () in _record_login_failure ~__context ~now ~uname ~originator ~record f let get_failed_login_stats = AuthFail.get_stats_string @@ -448,11 +448,12 @@ let revalidate_external_session ~__context ~session = (* 2. has the external session expired/does it need revalidation? *) let session_last_validation_time = - Date.to_float (Db.Session.get_validation_time ~__context ~self:session) + Date.to_unix_time + (Db.Session.get_validation_time ~__context ~self:session) in - let now = Unix.time () in + let now = Date.now () in let session_needs_revalidation = - now + Date.to_unix_time now > session_last_validation_time +. session_lifespan +. random_lifespan in if session_needs_revalidation then ( @@ -528,7 +529,7 @@ let revalidate_external_session ~__context ~session = (* session passed revalidation, let's update its last revalidation time *) Db.Session.set_validation_time ~__context ~self:session - ~value:(Date.of_float now) ; + ~value:now ; debug "updated validation time for session %s, sid %s " (trackid session) authenticated_user_sid ; (* let's also update the session's subject ref *) @@ -634,12 +635,11 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool (trackid session_id) pool (match uname with None -> "" | Some u -> u) originator is_local_superuser auth_user_sid (trackid parent) ; + let now = Date.now () in Db.Session.create ~__context ~ref:session_id ~uuid ~this_user:user - ~this_host:host ~pool - ~last_active:(Date.of_float (Unix.time ())) - ~other_config:[] ~subject ~is_local_superuser ~auth_user_sid - ~validation_time:(Date.of_float (Unix.time ())) - ~auth_user_name ~rbac_permissions ~parent ~originator ~client_certificate ; + ~this_host:host ~pool ~last_active:now ~other_config:[] ~subject + ~is_local_superuser ~auth_user_sid ~validation_time:now ~auth_user_name + ~rbac_permissions ~parent ~originator ~client_certificate ; if not pool then Atomic.incr total_sessions ; Ref.string_of session_id @@ -838,8 +838,14 @@ module Caching = struct | Some prev_result -> prev_result ) + + let clear_cache () = + let@ () = with_lock lock in + cache := None end +let clear_external_auth_cache = Caching.clear_cache + (* CP-714: Modify session.login_with_password to first try local super-user login; and then call into external auth plugin if this is enabled 1. If the pool master's Host.external_auth_type field is not none, then the diff --git a/ocaml/xapi/xapi_session.mli b/ocaml/xapi/xapi_session.mli index 2dc98429f3e..c228fc3bfc5 100644 --- a/ocaml/xapi/xapi_session.mli +++ b/ocaml/xapi/xapi_session.mli @@ -87,7 +87,7 @@ val create_from_db_file : (* for unit testing *) val _record_login_failure : __context:Context.t - -> now:Xapi_stdext_date.Date.iso8601 + -> now:Xapi_stdext_date.Date.t -> uname:string option -> originator:string option -> record:[< `log_only | `log_and_alert] @@ -110,3 +110,5 @@ val get_total_sessions : unit -> Int64.t val set_local_auth_max_threads : int64 -> unit val set_ext_auth_max_threads : int64 -> unit + +val clear_external_auth_cache : unit -> unit diff --git a/ocaml/xapi/xapi_sr.ml b/ocaml/xapi/xapi_sr.ml index 7b5186d5195..d572660e72d 100644 --- a/ocaml/xapi/xapi_sr.ml +++ b/ocaml/xapi/xapi_sr.ml @@ -682,7 +682,7 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = ~current_operations:[] ~allowed_operations:[] ~is_a_snapshot:vdi.is_a_snapshot ~snapshot_of:(find_vdi db_vdi_map vdi.snapshot_of) - ~snapshot_time:(Date.of_string vdi.snapshot_time) + ~snapshot_time:(Date.of_iso8601 vdi.snapshot_time) ~sR:sr ~virtual_size:vdi.virtual_size ~physical_utilisation:vdi.physical_utilisation ~_type:(try Storage_utils.vdi_type_of_string vdi.ty with _ -> `user) @@ -735,10 +735,10 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = debug "%s is_a_snapshot <- %b" (Ref.string_of r) vi.is_a_snapshot ; Db.VDI.set_is_a_snapshot ~__context ~self:r ~value:vi.is_a_snapshot ) ; - if v.API.vDI_snapshot_time <> Date.of_string vi.snapshot_time then ( + if v.API.vDI_snapshot_time <> Date.of_iso8601 vi.snapshot_time then ( debug "%s snapshot_time <- %s" (Ref.string_of r) vi.snapshot_time ; Db.VDI.set_snapshot_time ~__context ~self:r - ~value:(Date.of_string vi.snapshot_time) + ~value:(Date.of_iso8601 vi.snapshot_time) ) ; let snapshot_of = find_vdi db_vdi_map vi.snapshot_of in if v.API.vDI_snapshot_of <> snapshot_of then ( @@ -787,8 +787,8 @@ let scan ~__context ~sr = SRScanThrottle.execute (fun () -> transform_storage_exn (fun () -> let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let vs = - C.SR.scan (Ref.string_of task) + let vs, sr_info = + C.SR.scan2 (Ref.string_of task) (Storage_interface.Sr.of_string sr_uuid) in let db_vdis = @@ -796,10 +796,6 @@ let scan ~__context ~sr = ~expr:(Eq (Field "SR", Literal sr')) in update_vdis ~__context ~sr db_vdis vs ; - let sr_info = - C.SR.stat (Ref.string_of task) - (Storage_interface.Sr.of_string sr_uuid) - in let virtual_allocation = List.fold_left Int64.add 0L (List.map (fun v -> v.Storage_interface.virtual_size) vs) diff --git a/ocaml/xapi/xapi_vbd.ml b/ocaml/xapi/xapi_vbd.ml index 5e1b31c5bee..0bd805e5a26 100644 --- a/ocaml/xapi/xapi_vbd.ml +++ b/ocaml/xapi/xapi_vbd.ml @@ -260,7 +260,7 @@ let create ~__context ~vM ~vDI ~device ~userdevice ~bootable ~mode ~_type let metrics = Ref.make () and metrics_uuid = Uuidx.to_string (Uuidx.make ()) in Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch ~other_config:[] ; (* Enable the SM driver to specify a VBD backend kind for the VDI *) let other_config = @@ -310,10 +310,16 @@ let assert_not_suspended ~__context ~vm = if Db.VM.get_power_state ~__context ~self:vm = `Suspended then let expected = String.concat ", " - (List.map Record_util.power_to_string [`Halted; `Running]) + (List.map Record_util.vm_power_state_to_lowercase_string + [`Halted; `Running] + ) in let error_params = - [Ref.string_of vm; expected; Record_util.power_to_string `Suspended] + [ + Ref.string_of vm + ; expected + ; Record_util.vm_power_state_to_lowercase_string `Suspended + ] in raise (Api_errors.Server_error (Api_errors.vm_bad_power_state, error_params)) diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 94471108e41..c5a370df137 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -76,7 +76,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = ( if current_ops <> [] then let concurrent_op = List.hd current_ops in set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string concurrent_op] + ["VBD"; _ref; vbd_operations_to_string concurrent_op] (Listext.List.set_difference all_ops safe_to_parallelise) ) ; (* If not all operations are parallisable then preclude pause *) @@ -88,7 +88,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = parallelisable operations too *) if not all_are_parallelisable then set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string (List.hd current_ops)] + ["VBD"; _ref; vbd_operations_to_string (List.hd current_ops)] [`pause] ; (* If something other than `pause `unpause *and* `attach (for VM.reboot, see CA-24282) then disallow unpause *) if @@ -96,7 +96,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = <> [] then set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string (List.hd current_ops)] + ["VBD"; _ref; vbd_operations_to_string (List.hd current_ops)] [`unpause] ; (* Drives marked as not unpluggable cannot be unplugged *) if not record.Db_actions.vBD_unpluggable then @@ -122,8 +122,8 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = set_errors Api_errors.device_already_detached [_ref] [`unplug; `unplug_force] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in (* If not Running, always block these operations: *) let bad_ops = [`plug; `unplug; `unplug_force] in (* However allow VBD pause and unpause if the VM is paused: *) @@ -199,10 +199,16 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = ( if record.Db_actions.vBD_type = `CD && power_state = `Suspended then let expected = String.concat ", " - (List.map Record_util.power_to_string [`Halted; `Running]) + (List.map Record_util.vm_power_state_to_lowercase_string + [`Halted; `Running] + ) in let error_params = - [Ref.string_of vm; expected; Record_util.power_to_string `Suspended] + [ + Ref.string_of vm + ; expected + ; Record_util.vm_power_state_to_lowercase_string `Suspended + ] in set_errors Api_errors.vm_bad_power_state error_params [`insert; `eject] (* `attach required for resume *) @@ -229,7 +235,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = snd (List.hd vdi_record.Db_actions.vDI_current_operations) in set_errors Api_errors.other_operation_in_progress - ["VDI"; Ref.string_of vdi; vdi_operation_to_string concurrent_op] + ["VDI"; Ref.string_of vdi; vdi_operations_to_string concurrent_op] [`attach; `plug; `insert] ) ; if @@ -307,7 +313,7 @@ let throw_error (table : table) op = , [ Printf.sprintf "xapi_vbd_helpers.assert_operation_valid unknown operation: %s" - (vbd_operation_to_string op) + (vbd_operations_to_string op) ] ) ) @@ -427,8 +433,7 @@ let copy ~__context ?vdi ~vm vbd = let metrics_uuid = Uuidx.to_string (Uuidx.make ()) in let vdi = Option.value ~default:all.API.vBD_VDI vdi in Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) - ~other_config:[] ; + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch ~other_config:[] ; Db.VBD.create ~__context ~ref:new_vbd ~uuid:vbd_uuid ~allowed_operations:[] ~current_operations:[] ~storage_lock:false ~vM:vm ~vDI:vdi ~empty:(all.API.vBD_empty || vdi = Ref.null) diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index f2f1ed12688..ab8c543a36a 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -247,7 +247,7 @@ let check_operation_error ~__context ?sr_records:_ ?(pbd_records = []) if blocked_by_attach then Some ( Api_errors.vdi_in_use - , [_ref; Record_util.vdi_operation_to_string op] + , [_ref; Record_util.vdi_operations_to_string op] ) else if (* data_destroy first waits for all the VBDs to disappear in its @@ -961,7 +961,7 @@ let wait_for_vbds_to_be_unplugged_and_destroyed ~__context ~self ~timeout = ( Api_errors.vdi_in_use , [ Ref.string_of self - ; Record_util.vdi_operation_to_string `data_destroy + ; Record_util.vdi_operations_to_string `data_destroy ] ) ) diff --git a/ocaml/xapi/xapi_vdi.mli b/ocaml/xapi/xapi_vdi.mli index 8e52daf8305..ff3e5a9e0ec 100644 --- a/ocaml/xapi/xapi_vdi.mli +++ b/ocaml/xapi/xapi_vdi.mli @@ -87,7 +87,7 @@ val pool_introduce : -> physical_utilisation:int64 -> metadata_of_pool:[`pool] API.Ref.t -> is_a_snapshot:bool - -> snapshot_time:API.Date.iso8601 + -> snapshot_time:API.Date.t -> snapshot_of:[`VDI] API.Ref.t -> cbt_enabled:bool -> [`VDI] Ref.t @@ -110,7 +110,7 @@ val db_introduce : -> physical_utilisation:int64 -> metadata_of_pool:[`pool] API.Ref.t -> is_a_snapshot:bool - -> snapshot_time:API.Date.iso8601 + -> snapshot_time:API.Date.t -> snapshot_of:[`VDI] API.Ref.t -> cbt_enabled:bool -> [`VDI] Ref.t @@ -208,7 +208,7 @@ val set_snapshot_of : __context:Context.t -> self:[`VDI] API.Ref.t -> value:[`VDI] API.Ref.t -> unit val set_snapshot_time : - __context:Context.t -> self:[`VDI] API.Ref.t -> value:API.Date.iso8601 -> unit + __context:Context.t -> self:[`VDI] API.Ref.t -> value:API.Date.t -> unit val set_metadata_of_pool : __context:Context.t diff --git a/ocaml/xapi/xapi_vif_helpers.ml b/ocaml/xapi/xapi_vif_helpers.ml index 5b1f1f458f5..4a469b84368 100644 --- a/ocaml/xapi/xapi_vif_helpers.ml +++ b/ocaml/xapi/xapi_vif_helpers.ml @@ -54,13 +54,13 @@ let valid_operations ~__context record _ref' : table = debug "No operations are valid because current-operations = [ %s ]" (String.concat "; " (List.map - (fun (task, op) -> task ^ " -> " ^ vif_operation_to_string op) + (fun (task, op) -> task ^ " -> " ^ vif_operations_to_string op) current_ops ) ) ; let concurrent_op = snd (List.hd current_ops) in set_errors Api_errors.other_operation_in_progress - ["VIF"; _ref; vif_operation_to_string concurrent_op] + ["VIF"; _ref; vif_operations_to_string concurrent_op] all_ops ) ; (* No hotplug on dom0 *) @@ -85,8 +85,8 @@ let valid_operations ~__context record _ref' : table = | `Running, false -> set_errors Api_errors.device_already_detached [_ref] [`unplug] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in set_errors Api_errors.vm_bad_power_state [Ref.string_of vm; expected; actual] [`plug; `unplug] @@ -163,7 +163,7 @@ let throw_error (table : table) op = , [ Printf.sprintf "xapi_vif_helpers.assert_operation_valid unknown operation: %s" - (vif_operation_to_string op) + (vif_operations_to_string op) ] ) ) @@ -298,8 +298,7 @@ let create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config and metrics_uuid = Uuidx.to_string (Uuidx.make ()) in Db.VIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Xapi_stdext_date.Date.of_float 0.) - ~other_config:[] ; + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ; let (_ : unit) = Db.VIF.create ~__context ~ref ~uuid:(Uuidx.to_string uuid) ~current_operations:[] ~allowed_operations:[] ~reserved:false ~device diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml index eff46f84b93..3acd99a763e 100644 --- a/ocaml/xapi/xapi_vm.ml +++ b/ocaml/xapi/xapi_vm.ml @@ -411,8 +411,8 @@ let hard_reboot ~__context ~vm = ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_to_string `Running - ; Record_util.power_to_string `Suspended + ; Record_util.vm_power_state_to_lowercase_string `Running + ; Record_util.vm_power_state_to_lowercase_string `Suspended ] ) ) @@ -617,8 +617,8 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version let current_domain_type = if suspended then domain_type else `unspecified in Db.VM_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~memory_actual:0L ~vCPUs_number:0L ~vCPUs_utilisation ~vCPUs_CPU:[] - ~vCPUs_params:[] ~vCPUs_flags:[] ~state:[] ~start_time:Date.never - ~install_time:Date.never ~last_updated:Date.never ~other_config:[] + ~vCPUs_params:[] ~vCPUs_flags:[] ~state:[] ~start_time:Date.epoch + ~install_time:Date.epoch ~last_updated:Date.epoch ~other_config:[] ~hvm:false ~nested_virt:false ~nomigrate:false ~current_domain_type ; let domain_type = if domain_type = `unspecified then @@ -643,8 +643,8 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version ( vm_bad_power_state , [ Ref.string_of vm_ref - ; Record_util.power_to_string `Halted - ; Record_util.power_to_string power_state + ; Record_util.vm_power_state_to_lowercase_string `Halted + ; Record_util.vm_power_state_to_lowercase_string power_state ] ) ) ; @@ -652,7 +652,7 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version ~power_state:_power_state ~allowed_operations:[] ~current_operations:[] ~blocked_operations:[] ~name_label ~name_description ~user_version ~is_a_template ~is_default_template:false ~transportable_snapshot_id:"" - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null + ~is_a_snapshot:false ~snapshot_time:Date.epoch ~snapshot_of:Ref.null ~parent:Ref.null ~snapshot_info:[] ~snapshot_metadata:"" ~resident_on ~scheduled_to_be_resident_on ~affinity ~memory_overhead:0L ~memory_static_max ~memory_dynamic_max ~memory_target ~memory_dynamic_min @@ -1601,6 +1601,18 @@ let set_domain_type ~__context ~self ~value = Db.VM.set_HVM_boot_policy ~__context ~self ~value:(derive_hvm_boot_policy ~domain_type:value) +let set_blocked_operations ~__context ~self ~value = + debug "%s" __FUNCTION__ ; + Db.VM.set_blocked_operations ~__context ~self ~value + +let add_to_blocked_operations ~__context ~self ~key ~value = + debug "%s" __FUNCTION__ ; + Db.VM.add_to_blocked_operations ~__context ~self ~key ~value + +let remove_from_blocked_operations ~__context ~self ~key = + debug "%s" __FUNCTION__ ; + Db.VM.remove_from_blocked_operations ~__context ~self ~key + let set_HVM_boot_policy ~__context ~self ~value = Db.VM.set_domain_type ~__context ~self ~value:(derive_domain_type ~hVM_boot_policy:value) ; @@ -1627,8 +1639,8 @@ let restart_device_models ~__context ~self = ( vm_bad_power_state , [ Ref.string_of self - ; Record_util.power_state_to_string `Running - ; Record_util.power_state_to_string power_state + ; Record_util.vm_power_state_to_string `Running + ; Record_util.vm_power_state_to_string power_state ] ) ) ; diff --git a/ocaml/xapi/xapi_vm.mli b/ocaml/xapi/xapi_vm.mli index 19a737755e0..d0771c49cfa 100644 --- a/ocaml/xapi/xapi_vm.mli +++ b/ocaml/xapi/xapi_vm.mli @@ -428,3 +428,19 @@ val set_uefi_mode : val get_secureboot_readiness : __context:Context.t -> self:API.ref_VM -> API.vm_secureboot_readiness + +val set_blocked_operations : + __context:Context.t + -> self:API.ref_VM + -> value:(API.vm_operations * string) list + -> unit + +val add_to_blocked_operations : + __context:Context.t + -> self:API.ref_VM + -> key:API.vm_operations + -> value:string + -> unit + +val remove_from_blocked_operations : + __context:Context.t -> self:API.ref_VM -> key:API.vm_operations -> unit diff --git a/ocaml/xapi/xapi_vm_clone.ml b/ocaml/xapi/xapi_vm_clone.ml index 997dc5cfdb4..c2a5211d250 100644 --- a/ocaml/xapi/xapi_vm_clone.ml +++ b/ocaml/xapi/xapi_vm_clone.ml @@ -231,7 +231,9 @@ let quiesced = "quiesced" let snapshot_info ~power_state ~is_a_snapshot = let power_state_info = - [(power_state_at_snapshot, Record_util.power_state_to_string power_state)] + [ + (power_state_at_snapshot, Record_util.vm_power_state_to_string power_state) + ] in if is_a_snapshot then (disk_snapshot_type, crash_consistent) :: power_state_info @@ -341,9 +343,9 @@ let copy_vm_record ?snapshot_info_record ~__context ~vm ~disk_op ~new_name ~snapshot_of:(if is_a_snapshot then vm else Ref.null) ~snapshot_time: ( if is_a_snapshot then - Date.of_float (Unix.gettimeofday ()) + Date.now () else - Date.never + Date.epoch ) ~snapshot_info: ( match snapshot_info_record with diff --git a/ocaml/xapi/xapi_vm_clone.mli b/ocaml/xapi/xapi_vm_clone.mli index 7105a98106a..05843952fca 100644 --- a/ocaml/xapi/xapi_vm_clone.mli +++ b/ocaml/xapi/xapi_vm_clone.mli @@ -24,8 +24,7 @@ val disk_snapshot_type : string val quiesced : string val snapshot_info : - power_state: - [< `Halted | `Migrating | `Paused | `Running | `ShuttingDown | `Suspended] + power_state:[< `Halted | `Paused | `Running | `Suspended] -> is_a_snapshot:bool -> (string * string) list diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index 88590dc195b..0387dee1952 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -79,9 +79,7 @@ let set_is_a_template ~__context ~self ~value = info "VM.set_is_a_template('%b')" value ; let m = Db.VM.get_metrics ~__context ~self in ( if not value then - try - Db.VM_metrics.set_install_time ~__context ~self:m - ~value:(Date.of_float (Unix.gettimeofday ())) + try Db.VM_metrics.set_install_time ~__context ~self:m ~value:(Date.now ()) with _ -> warn "Could not update VM install time because metrics object was missing" @@ -1413,19 +1411,19 @@ let copy_metrics ~__context ~vm = m ) ~start_time: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_start_time) m ) ~install_time: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_install_time) m ) ~state: (Option.fold ~none:[] ~some:(fun x -> x.Db_actions.vM_metrics_state) m) ~last_updated: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_last_updated) m ) @@ -1461,6 +1459,7 @@ let copy_guest_metrics ~__context ~vm = Db.VM_guest_metrics.create ~__context ~ref ~uuid:(Uuidx.to_string (Uuidx.make ())) ~os_version:all.API.vM_guest_metrics_os_version + ~netbios_name:all.API.vM_guest_metrics_netbios_name ~pV_drivers_version:all.API.vM_guest_metrics_PV_drivers_version ~pV_drivers_up_to_date:all.API.vM_guest_metrics_PV_drivers_up_to_date ~memory:all.API.vM_guest_metrics_memory diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml index 2f6130641df..914cfd15e8a 100644 --- a/ocaml/xapi/xapi_vm_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_lifecycle.ml @@ -262,9 +262,10 @@ let check_snapshot ~vmr:_ ~op ~ref_str = let report_power_state_error ~__context ~vmr ~power_state ~op ~ref_str = let expected = allowed_power_states ~__context ~vmr ~op in let expected = - String.concat ", " (List.map Record_util.power_to_string expected) + String.concat ", " + (List.map Record_util.vm_power_state_to_lowercase_string expected) in - let actual = Record_util.power_to_string power_state in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in Some (Api_errors.vm_bad_power_state, [ref_str; expected; actual]) let report_concurrent_operations_error ~current_ops ~ref_str = @@ -972,8 +973,9 @@ let assert_initial_power_state_in ~__context ~self ~allowed = ( Api_errors.vm_bad_power_state , [ Ref.string_of self - ; List.map Record_util.power_to_string allowed |> String.concat ";" - ; Record_util.power_to_string actual + ; List.map Record_util.vm_power_state_to_lowercase_string allowed + |> String.concat ";" + ; Record_util.vm_power_state_to_lowercase_string actual ] ) ) @@ -992,8 +994,9 @@ let assert_final_power_state_in ~__context ~self ~allowed = , [ "VM not in expected power state after completing operation" ; Ref.string_of self - ; List.map Record_util.power_to_string allowed |> String.concat ";" - ; Record_util.power_to_string actual + ; List.map Record_util.vm_power_state_to_lowercase_string allowed + |> String.concat ";" + ; Record_util.vm_power_state_to_lowercase_string actual ] ) ) diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index e57ef22fbad..677da6fe8f1 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -1370,10 +1370,10 @@ let migrate_send' ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~vgpu_map let r = Int64.compare v1.size v2.size in if r = 0 then let t1 = - Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) + Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in let t2 = - Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) + Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in compare t1 t2 else @@ -1805,8 +1805,8 @@ let assert_can_migrate ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~options ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_to_string `Halted - ; Record_util.power_to_string power_state + ; Record_util.vm_power_state_to_lowercase_string `Halted + ; Record_util.vm_power_state_to_lowercase_string power_state ] ) ) ; diff --git a/ocaml/xapi/xapi_vmss.ml b/ocaml/xapi/xapi_vmss.ml index 03badb83b60..d4a960cae81 100644 --- a/ocaml/xapi/xapi_vmss.ml +++ b/ocaml/xapi/xapi_vmss.ml @@ -228,7 +228,7 @@ let create ~__context ~name_label ~name_description ~enabled ~_type let uuid = Uuidx.to_string (Uuidx.make ()) in Db.VMSS.create ~__context ~ref ~uuid ~name_label ~name_description ~enabled ~_type ~retained_snapshots ~frequency ~schedule - ~last_run_time:(Xapi_stdext_date.Date.of_float 0.) ; + ~last_run_time:Xapi_stdext_date.Date.epoch ; ref let destroy_all_messages ~__context ~self = diff --git a/ocaml/xapi/xapi_vusb_helpers.ml b/ocaml/xapi/xapi_vusb_helpers.ml index 4c8b8d5eb2a..2b9e0805865 100644 --- a/ocaml/xapi/xapi_vusb_helpers.ml +++ b/ocaml/xapi/xapi_vusb_helpers.ml @@ -52,13 +52,13 @@ let valid_operations ~__context record _ref' : table = debug "No operations are valid because current-operations = [ %s ]" (String.concat "; " (List.map - (fun (task, op) -> task ^ " -> " ^ vusb_operation_to_string op) + (fun (task, op) -> task ^ " -> " ^ vusb_operations_to_string op) current_ops ) ) ; let concurrent_op = snd (List.hd current_ops) in set_errors Api_errors.other_operation_in_progress - ["VUSB"; _ref; vusb_operation_to_string concurrent_op] + ["VUSB"; _ref; vusb_operations_to_string concurrent_op] all_ops ) ; let vm = Db.VUSB.get_VM ~__context ~self:_ref' in @@ -69,8 +69,8 @@ let valid_operations ~__context record _ref' : table = | `Running, false -> set_errors Api_errors.device_already_detached [_ref] [`unplug] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in set_errors Api_errors.vm_bad_power_state [Ref.string_of vm; expected; actual] [`plug; `unplug] @@ -101,7 +101,7 @@ let throw_error (table : table) op = Printf.sprintf "xapi_vusb_helpers.assert_operation_valid unknown operation: \ %s" - (vusb_operation_to_string op) + (vusb_operations_to_string op) ] ) ) diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index dfb2b666205..f50e692a555 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -44,8 +44,8 @@ let check_power_state_is ~__context ~self ~expected = if actual <> expected then warn "Potential problem: VM %s in power state '%s' when expecting '%s'" (Db.VM.get_uuid ~__context ~self) - (Record_util.power_to_string actual) - (Record_util.power_to_string expected) + (Record_util.vm_power_state_to_lowercase_string actual) + (Record_util.vm_power_state_to_lowercase_string expected) let event_wait queue_name dbg ?from p = let finished = ref false in @@ -2047,7 +2047,7 @@ let update_vm ~__context id = changed." ; should_update_allowed_operations := true ; debug "xenopsd event: Updating VM %s power_state <- %s" id - (Record_util.power_state_to_string power_state) ; + (Record_util.vm_power_state_to_string power_state) ; (* This will mark VBDs, VIFs as detached and clear resident_on if the VM has permanently shutdown. current-operations should not be reset as there maybe a checkpoint is ongoing*) @@ -2282,14 +2282,16 @@ let update_vm ~__context id = Option.iter (fun (_, state) -> let metrics = Db.VM.get_metrics ~__context ~self in - let start_time = Date.of_float state.Vm.last_start_time in + let start_time = + Date.of_unix_time state.Vm.last_start_time + in if start_time <> Db.VM_metrics.get_start_time ~__context ~self:metrics then ( debug "xenopsd event: Updating VM %s last_start_time <- %s" id - (Date.to_string (Date.of_float state.Vm.last_start_time)) ; + Date.(to_rfc3339 (of_unix_time state.Vm.last_start_time)) ; Db.VM_metrics.set_start_time ~__context ~self:metrics ~value:start_time ; if @@ -2313,8 +2315,8 @@ let update_vm ~__context id = "VM %s guest metrics update time (%s) < VM start time \ (%s): deleting" id - (Date.to_string update_time) - (Date.to_string start_time) ; + (Date.to_rfc3339 update_time) + (Date.to_rfc3339 start_time) ; Xapi_vm_helpers.delete_guest_metrics ~__context ~self ; check_guest_agent () ) @@ -3426,7 +3428,7 @@ let transform_xenops_exn ~__context ~vm queue_name f = | Bad_power_state (found, expected) -> let f x = xenapi_of_xenops_power_state (Some x) - |> Record_util.power_state_to_string + |> Record_util.vm_power_state_to_string in let found = f found and expected = f expected in reraise Api_errors.vm_bad_power_state diff --git a/ocaml/xcp-rrdd/dune b/ocaml/xcp-rrdd/dune new file mode 100644 index 00000000000..db0846cbfe3 --- /dev/null +++ b/ocaml/xcp-rrdd/dune @@ -0,0 +1 @@ +(data_only_dirs scripts bugtool-plugin) diff --git a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py index e25e0ddf016..17f7c2398f4 100755 --- a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py +++ b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py @@ -1,18 +1,19 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import rrdd, os +import os +import rrdd if __name__ == "__main__": - # Create a proxy for communicating with xcp-rrdd. - api = rrdd.API(plugin_id="host_mem") - while True: - # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. - api.wait_until_next_reading(neg_shift=.5) - # Collect measurements. - cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" - vs = os.popen(cmd).read().strip().split() - # Tell the proxy which datasources should be exposed in this iteration. - api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") - api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") - # Write all required information into a file about to be read by xcp-rrdd. - api.update() + # Create a proxy for communicating with xcp-rrdd. + api = rrdd.API(plugin_id="host_mem") + while True: + # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. + api.wait_until_next_reading(neg_shift=.5) + # Collect measurements. + cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" + vs = os.popen(cmd).read().strip().split() + # Tell the proxy which datasources should be exposed in this iteration. + api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") + api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") + # Write all required information into a file about to be read by xcp-rrdd. + api.update() diff --git a/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py b/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py index 5ca9b897fad..a038513e230 100644 --- a/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py +++ b/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py @@ -1,7 +1,11 @@ # Test: pytest -v -s ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py """Parametrized test exercising all conditions in rrdd.API.wait_until_next_reading()""" +import json import socket +from io import BytesIO +from struct import pack, unpack from warnings import catch_warnings as import_without_warnings, simplefilter +from zlib import crc32 # Dependencies: # pip install pytest-mock @@ -77,3 +81,114 @@ def test_api_getter_functions(api): api.path = "path" assert api.get_header() == "header" assert api.get_path() == "path" + + +class MockDataSource: + """Mock class for testing the rrdd.API.update() method""" + def __init__(self, name, metadata, packed_data): + self.name = name + self.metadata = metadata + self.packed_data = packed_data + + def pack_data(self): + """Simple substitute for the pack_data() method of the rrdd.DataSource class""" + return self.packed_data + + +@pytest.mark.parametrize( + "data_sources, expected_metadata", + [ + pytest.param( + [ + MockDataSource("ds1", {"key1": "value1"}, b"\x00\x01"), + MockDataSource("ds2", {"key2": "value2"}, b"\x00\x02"), + ], + {"key1": "value1", "key2": "value2"}, + ), + pytest.param( + [MockDataSource("ds1", {"key1": "value1"}, b"\x00\x01")], + {"key1": "value1"}, + ), + pytest.param( + [], + {}, + ), + ], +) +def test_update( + mocker, + data_sources, + expected_metadata, +): + """Test the update() method of the rrdd.API class""" + # Arrange + def checksum(*args): + """Calculate the CRC32 checksum of the given arguments""" + return crc32(*args) & 0xFFFFFFFF + + class MockAPI(rrdd.API): + """Mock API class to test the update() method""" + def __init__(self): # pylint: disable=super-init-not-called + self.dest = BytesIO() + self.datasources = data_sources + + def pack_data(self, ds: MockDataSource): + return ds.pack_data() + + testee = MockAPI() + testee.deregister = mocker.Mock() + fixed_time = 1234567890 + mocker.patch("time.time", return_value=fixed_time) + + # Act + testee.update() + + # Assert + + # Read and unpack the header + testee.dest.seek(0) + # The header is 20 bytes long and has the following format: + # 0-11: "DATASOURCES" (12 bytes) + # 12-15: data_checksum (4 bytes) + # 16-19: metadata_checksum (4 bytes) + # 20-23: num_datasources (4 bytes) + # 24-31: timestamp (8 bytes) + header_len = len("DATASOURCES") + 4 + 4 + 4 + 8 + header = testee.dest.read(header_len) + ( + unpacked_data_checksum, + unpacked_metadata_checksum, + unpacked_num_datasources, + unpacked_timestamp, + ) = unpack(">LLLQ", header[11:]) + + # Assert the expected unpacked header value + assert header.startswith(b"DATASOURCES") + assert unpacked_num_datasources == len(data_sources) + assert unpacked_timestamp == fixed_time + + # + # Assert datasources and the expected data checksum + # + + # Initialize the expected checksum with the fixed time + expected_checksum = checksum(pack(">Q", fixed_time)) + # Loop over the datasources and assert the packed data + testee.dest.seek(header_len) + # sourcery skip: no-loop-in-tests + for ds in data_sources: + packed_data = testee.dest.read(len(ds.pack_data())) + assert packed_data == ds.pack_data() + # Update the checksum with the packed data + expected_checksum = checksum(packed_data, expected_checksum) + + assert unpacked_data_checksum == expected_checksum + + # + # Assert metadata and the expected metadata checksum + # + metadata_length = unpack(">L", testee.dest.read(4))[0] + metadata_json = testee.dest.read(metadata_length) + + assert json.loads(metadata_json) == {"datasources": expected_metadata} + assert unpacked_metadata_checksum == checksum(metadata_json) diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion index 0cf725eb76c..0c29a5446b9 100644 --- a/ocaml/xe-cli/bash-completion +++ b/ocaml/xe-cli/bash-completion @@ -617,7 +617,9 @@ _xe() __xe_debug "triggering autocompletion for parameter names, param is '$param'" IFS=$'\n,' - REQD_OPTIONAL_PARAMS=1 + if [ ! "$param" ]; then + REQD_OPTIONAL_PARAMS=1 + fi get_params_for_command "${OLDSTYLE_WORDS[1]}" # Don't suggest already provided parameters @@ -812,15 +814,14 @@ set_completions() if [[ $REQD_OPTIONAL_PARAMS == 1 ]]; then local reqd_params=$( __preprocess_suggestions "$REQD_PARAMS" ) local opt_params=$( __preprocess_suggestions "$OPT_PARAMS" ) + if [[ "$excludes" ]]; then + reqd_params=$(echo "$reqd_params" | eval "grep -v $excludes") + opt_params=$(echo "$opt_params" | eval "grep -v $excludes") + fi if [[ "$reqd_params" && "$opt_params" ]]; then __xe_debug "showing optional/required parameters" SHOW_DESCRIPTION=1 - if [[ "$excludes" ]]; then - reqd_params=$(echo "$reqd_params" | eval "grep -v $excludes") - opt_params=$(echo "$opt_params" | eval "grep -v $excludes") - fi - for word in $reqd_params; do __add_completion "$word" "REQUIRED" "$max_cmd_length" done diff --git a/ocaml/xenopsd/cli/dune b/ocaml/xenopsd/cli/dune index 0b2e0f0c2cf..f4cf59242c1 100644 --- a/ocaml/xenopsd/cli/dune +++ b/ocaml/xenopsd/cli/dune @@ -8,7 +8,7 @@ (libraries astring cmdliner - + re result rpclib.core @@ -22,6 +22,7 @@ xapi-idl.xen.interface xapi-idl.xen.interface.types xapi-stdext-pervasives + yojson ) (preprocess (per_module ((pps ppx_deriving_rpc) Common Xn_cfg_types))) ) diff --git a/ocaml/xenopsd/cli/xn.ml b/ocaml/xenopsd/cli/xn.ml index 9658650699f..811b004bdc3 100644 --- a/ocaml/xenopsd/cli/xn.ml +++ b/ocaml/xenopsd/cli/xn.ml @@ -701,70 +701,10 @@ let list_compact () = let list copts = diagnose_error (if copts.Common.verbose then list_verbose else list_compact) -type t = Line of string | Block of t list - -let pp x = - let open Rpc in - let rec to_string_list = function - | Line x -> - [x] - | Block xs -> - let xs' = List.map to_string_list xs |> List.concat in - List.map (fun x -> " " ^ x) xs' - in - let flatten xs = - let rec aux line = function - | Line x :: xs -> - aux (if line <> "" then line ^ " " ^ x else x) xs - | Block x :: xs -> - (if line <> "" then [Line line] else []) - @ [Block (aux "" x)] - @ aux "" xs - | [] -> - if line <> "" then [Line line] else [] - in - aux "" xs - in - let rec to_t = function - | Int32 x -> - [Line (Printf.sprintf "%d" (Int32.to_int x))] - | Int x -> - [Line (Printf.sprintf "%Ld" x)] - | Bool x -> - [Line (Printf.sprintf "%b" x)] - | Float x -> - [Line (Printf.sprintf "%g" x)] - | String x -> - [Line x] - | DateTime x -> - [Line x] - | Enum [] -> - [Line "[]"] - | Enum xs -> - [Line "["; Block (List.concat (List.map to_t xs)); Line "]"] - | Dict [] -> - [Line "{}"] - | Dict xs -> - [ - Line "{" - ; Block - (List.concat (List.map (fun (s, t) -> Line (s ^ ": ") :: to_t t) xs)) - ; Line "}" - ] - | Base64 x -> - [Line x] - | Null -> - [] - in - x - |> to_t - |> flatten - |> List.map to_string_list - |> List.concat - |> List.iter (Printf.printf "%s\n") - let diagnostics' () = - Client.get_diagnostics dbg () |> Jsonrpc.of_string |> pp ; + Client.get_diagnostics dbg () + |> Yojson.Safe.prettify ~std:true + |> print_endline ; `Ok () let stat_vm _ id = diff --git a/ocaml/xenopsd/dune b/ocaml/xenopsd/dune new file mode 100644 index 00000000000..389b982cc01 --- /dev/null +++ b/ocaml/xenopsd/dune @@ -0,0 +1 @@ +(data_only_dirs scripts) diff --git a/ocaml/xenopsd/lib/suspend_image.ml b/ocaml/xenopsd/lib/suspend_image.ml index 029224e7b06..e08cb53c268 100644 --- a/ocaml/xenopsd/lib/suspend_image.ml +++ b/ocaml/xenopsd/lib/suspend_image.ml @@ -42,7 +42,7 @@ module Xenops_record = struct [@@deriving sexp] let make ?vm_str ?xs_subtree () = - let time = Xapi_stdext_date.Date.(to_string (now ())) in + let time = Xapi_stdext_date.Date.(to_rfc3339 (now ())) in let word_size = Sys.word_size in {word_size; time; vm_str; xs_subtree} diff --git a/ocaml/xenopsd/lib/xenops_server.ml b/ocaml/xenopsd/lib/xenops_server.ml index 71ad563ed19..e65b929e1f4 100644 --- a/ocaml/xenopsd/lib/xenops_server.ml +++ b/ocaml/xenopsd/lib/xenops_server.ml @@ -1152,7 +1152,7 @@ module WorkerPool = struct let t' = Xenops_task.to_interface_task t in { id= t'.Task.id - ; ctime= t'.Task.ctime |> Date.of_float |> Date.to_string + ; ctime= t'.Task.ctime |> Date.of_unix_time |> Date.to_rfc3339 ; dbg= t'.Task.dbg ; subtasks= List.map diff --git a/ocaml/xenopsd/scripts/common.py b/ocaml/xenopsd/scripts/common.py index af8666ce62c..26adb00c0d2 100755 --- a/ocaml/xenopsd/scripts/common.py +++ b/ocaml/xenopsd/scripts/common.py @@ -154,18 +154,7 @@ def get_bridge(self): return network[1] def get_address(self): return "fe:ff:ff:ff:ff:ff" - def get_ethtool(self): - results = [] - for (k, v) in self.json["other_config"]: - if k.startswith("ethtool-"): - k = k[len("ethtool-"):] - if v == "true" or v == "on": - results.append(k, True) - elif v == "false" or v == "off": - results.append(k, False) - else: - send_to_syslog("VIF %s/%d: ignoring ethtool argument %s=%s (use true/false)" % (self.vm_uuid, self.devid, k, v)) - return results + def get_mac(self): return self.json["mac"] def get_mtu(self): @@ -192,28 +181,33 @@ def get_external_ids(self): results["xs-network-uuid"] = self.json["extra_private_keys"]["network-uuid"] results["attached-mac"] = self.get_mac() return results + def get_locking_mode(self): - def get_words(value, separator): - if string.strip(value) == "": - return [] - else: - return string.split(value, separator) + """ + Get the locking mode configuration for the VIF. + + :returns dict: A dictionary containing the locking mode configuration with keys: + - mac: The MAC address + - locking_mode: The locking mode + - ipv4_allowed: List of IPv4 addresses allowed + - ipv6_allowed: List of IPv6 addresses allowed + """ results = { "mac": self.get_mac(), "locking_mode": "", "ipv4_allowed": [], - "ipv6_allowed": [] + "ipv6_allowed": [], } if "locking_mode" in self.json: - if type(self.json["locking_mode"]) is list: - # Must be type=locked here + if isinstance(self.json["locking_mode"], list): + # Must be type=locked and have keys for allowed ipv4 and ipv6 addresses results["locking_mode"] = self.json["locking_mode"][0].lower() - locked_params=self.json["locking_mode"][1] + locked_params = self.json["locking_mode"][1] results["ipv4_allowed"] = locked_params["ipv4"] results["ipv6_allowed"] = locked_params["ipv6"] else: results["locking_mode"] = self.json["locking_mode"].lower() - send_to_syslog("Got locking config: %s" % (repr(results))) + send_to_syslog("Got locking config: " + repr(results)) return results class Interface: @@ -223,17 +217,3 @@ def __init__(self, vif_name, uuid, devid): self.vif = VIF(vif_name, uuid, int(devid)) def get_vif(self): return self.vif - def online(self): - v = self.get_vif() - mode = v.get_mode() - for (key, value) in v.get_ethtool(): - set_ethtool(mode, self.name, key, value) - set_mtu(mode, self.name, v.get_mtu()) - add_to_bridge(mode, self.name, v.get_bridge(), v.get_address(), v.get_external_ids()) - add_vif_rules(self.name) - set_promiscuous(mode, self.name, v.get_promiscuous()) - -#def add(mode, dev, bridge, address, external_ids): -# add_to_bridge(mode, dev, bridge, address, external_ids) - - diff --git a/ocaml/xenopsd/scripts/make-custom-xenopsd.conf b/ocaml/xenopsd/scripts/make-custom-xenopsd.conf index b49610f0e9a..59f52269157 100755 --- a/ocaml/xenopsd/scripts/make-custom-xenopsd.conf +++ b/ocaml/xenopsd/scripts/make-custom-xenopsd.conf @@ -41,7 +41,6 @@ vif-script=${XENOPSD_LIBEXECDIR}/vif vif-xl-script=${XENOPSD_LIBEXECDIR}/vif vbd-script=${XENOPSD_LIBEXECDIR}/block vbd-xl-script=${XENOPSD_LIBEXECDIR}/block -qemu-vif-script=${XENOPSD_LIBEXECDIR}/qemu-vif-script setup-vif-rules=${XENOPSD_LIBEXECDIR}/setup-vif-rules sockets-group=$group qemu-wrapper=${QEMU_WRAPPER_DIR}/qemu-wrapper diff --git a/ocaml/xenopsd/scripts/qemu-vif-script b/ocaml/xenopsd/scripts/qemu-vif-script deleted file mode 100755 index a8fe976e3a1..00000000000 --- a/ocaml/xenopsd/scripts/qemu-vif-script +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 - - -from common import * -import sys - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage:", file=sys.stderr) - print(" %s " % sys.argv[0], file=sys.stderr) - sys.exit(1) - name = sys.argv[1] - send_to_syslog("setting up interface %s" % name) - i = Interface(name) - i.online() diff --git a/ocaml/xenopsd/scripts/test_common_class_vif.py b/ocaml/xenopsd/scripts/test_common_class_vif.py new file mode 100644 index 00000000000..006d1966f6d --- /dev/null +++ b/ocaml/xenopsd/scripts/test_common_class_vif.py @@ -0,0 +1,78 @@ +"""Test ocaml/xenopsd/scripts/common.VIF.get_locking_mode()""" + +from unittest.mock import patch # to check the arguments passed to send_to_syslog() + +import pytest # for pytest.parametrize to run the same test with different parameters + +import common # Tested module + + +# Mock class to simulate the object containing the get_locking_mode method +class VifMockSubclass(common.VIF): + """Mock class to simulate a VIF object containing the get_locking_mode method""" + + def __init__(self, json): # pylint: disable=super-init-not-called + """Do not call the parent constructor, it would open a file""" + self.json = json + + def get_mac(self): + return "00:11:22:33:44:55" # Expected MAC address + + +@pytest.mark.parametrize( + # Call the test case 3 times with two args: + # inp: input for VIF.get_locking_mode() + # expected_output: expected output of the get_locking_mode method + # Asserted with: + # assert expected_output == get_locking_mode(input) + "input_params, expected_output", + [ + # Happy path tests + ( + # locked + { # input + "locking_mode": [ + "locked", + {"ipv4": ["1.1.1.1"], "ipv6": ["fe80::1"]}, + ] + }, # expected output + { + "mac": "00:11:22:33:44:55", + "locking_mode": "locked", + "ipv4_allowed": ["1.1.1.1"], + "ipv6_allowed": ["fe80::1"], + }, + ), + ( + # unlocked + {"locking_mode": "unlocked"}, + { + "mac": "00:11:22:33:44:55", + "locking_mode": "unlocked", + "ipv4_allowed": [], + "ipv6_allowed": [], + }, + ), + ( + {}, # no locking_mode + { + "mac": "00:11:22:33:44:55", + "locking_mode": "", + "ipv4_allowed": [], + "ipv6_allowed": [], + }, + ), + ], +) +def test_get_locking_mode(input_params, expected_output): + """Test VIF.get_locking_mode() using the VIF class test parameters defined above.""" + + # Act: Get the locking mode configuration for the input params from the VIF object: + with patch("common.send_to_syslog") as send_to_syslog: + test_result = VifMockSubclass(input_params).get_locking_mode() + + # Assert the expected output and the expected call to send_to_syslog(): + assert test_result == expected_output + send_to_syslog.assert_called_once_with( + "Got locking config: " + repr(expected_output) + ) diff --git a/ocaml/xenopsd/xc/dune b/ocaml/xenopsd/xc/dune index 4a79452dbbe..b841da23fbc 100644 --- a/ocaml/xenopsd/xc/dune +++ b/ocaml/xenopsd/xc/dune @@ -25,6 +25,9 @@ rpclib.core rpclib.json rresult + rrdd-plugin + rrdd-plugin.base + rrdd-plugin.local sexplib0 qmp threads.posix diff --git a/ocaml/xenopsd/xc/mem_stats.ml b/ocaml/xenopsd/xc/mem_stats.ml new file mode 100644 index 00000000000..9e01d14473e --- /dev/null +++ b/ocaml/xenopsd/xc/mem_stats.ml @@ -0,0 +1,337 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +module D = Debug.Make (struct let name = "mem_stats" end) + +module Compat = struct + let mtime_clock_counter = Mtime_clock.counter + + let mtime_clock_count = Mtime_clock.count + + let mtime_span_to_s f = Mtime.Span.to_float_ns f *. 1e-9 + + let file_lines_fold = Xapi_stdext_unix.Unixext.file_lines_fold + + let reporter_async ~shared_page_count ~dss_f = + let open Rrdd_plugin in + Reporter.start_async + (module D : Debug.DEBUG) + ~uid:"mem-stats" ~neg_shift:0.5 ~target:(Reporter.Local shared_page_count) + ~protocol:Rrd_interface.V2 ~dss_f +end + +open Compat + +module SlowReporter = struct + (** [report ~interval_s ~generate_dss] calls [generate_dss] every [interval_s] only, + and substitutes the previous value when called more often. + Using VT_Unknown or NaN would leave gaps in the graph. + Report_local only supports reporting at 5s intervals, but some metrics are too costly to + gather that often, e.g. Gc.stat needs to walk the entire heap. + *) + let report ~interval_s ~generate_dss state = + match state with + | Some (t0, dss) when mtime_clock_count t0 |> mtime_span_to_s < interval_s + -> + (state, (false, dss)) + | None | Some _ -> + let dss = generate_dss () in + let next_state = Some (mtime_clock_counter (), dss) in + (next_state, (true, dss)) + + let iter_of_fold f = + let last = ref None in + fun () -> + let next, r = f !last in + (* to make reasoning about metrics easier, this is the only place + that contains explicit mutation in this file + (other than hashtbl construction on startup) + *) + last := next ; + r +end + +module SimpleMetrics = struct + (* metric definitions *) + + (* Caveats: + * do not use ~transform flag to ds_make: it is lost during rrd-transport with no warning! + * writing a VT_Unknown causes an exception and prevents other values from being written too + * the way to write an unknown value is to write something out of min/max range, which then gets + converted to nan upon reading (or write nan if the type is float, don't do this if type is int + since it causes a metadata change) + * use Sys.word_size, and not hard-coded word-to-kib conversion, the old code still had the + values for a 32-bit word + * the value type (int64 vs float) is cached by the transport, changing it causes a metadata + change + * it is best to always write out the same Rrd list, even if some values are missing, otherwise + the metadata would have to get reparsed (crc changed). But that doesn't actually work that way + because the metadata also includes the values (they are changed to uninitialized_ds only upon + read), thus any change in value triggers a metadata crc change, and a metadata reparse! Still + lets assume this will be fixed and avoid needless metadata changes! + * everything gets converted to a float in the end: int64 type just has some more precision prior + to delta calculation. However for our purposes here the 'float' type (which is double precision) + has 53-bits of precision, which is more than enough for KiB - it results in 63-bits of precision + in term of bytes (including sign) + *) + + let ds_int64 i = Rrd.VT_Int64 i + + let ds_float f = Rrd.VT_Float f + + let ds_update ds v = {ds with Ds.ds_value= v} + + let executable = Filename.basename Sys.executable_name + + let ds_name name = + Printf.sprintf "%s_%s" executable (Astring.String.Ascii.lowercase name) + + let ds_description desc = Printf.sprintf desc executable + + let to_float ds = + match ds.Ds.ds_value with + | Rrd.VT_Int64 i -> + if i >= 0L then Int64.to_float i else nan + | Rrd.VT_Float f -> + f + | Rrd.VT_Unknown -> + nan + + let define_unit ~ty ~units ~min ?(default = true) name description = + let name = ds_name name in + let description = ds_description description in + fun value -> + Ds.ds_make ~name ~description ~value ~ty ~default ~units ~min () + + let kib ?default ?(min = 0.0) v = + define_unit ~ty:Rrd.Gauge ~units:"KiB" ~min ?default v + + let kib_per_s = define_unit ~ty:Rrd.Derive ~units:"KiB/s" ~min:0.0 + + let count = define_unit ~ty:Rrd.Gauge ~units:"" ~min:0.0 + + let words_to_kib w = w *. float (Sys.word_size / 8) /. 1024. |> ds_float +end + +module Proc = struct + module KV = Astring.String.Map + + (** [parse_value_count] parses ' N'. *) + let parse_value_count s = Scanf.sscanf s " %Lu" Fun.id + + (** [parse_value_kib s] parses values of the form ' N kB'. *) + let parse_value_kib s = Scanf.sscanf s " %Lu kB" Fun.id + + let file_lines_filter_map f ~path = + let fold acc line = + match f line with None -> acc | Some (k, v) -> KV.add k v acc + in + file_lines_fold fold KV.empty path + + open SimpleMetrics + + let parse_keyvalue fields ~path () = + let parse_pairs = function + | None -> + None + | Some (key, value) -> ( + match KV.find key fields with + | None -> + None + | Some (parse_value, ds) -> + let v = parse_value value in + Some (key, ds_update ds @@ ds_int64 v) + ) + in + file_lines_filter_map ~path @@ fun line -> + line |> Astring.String.cut ~sep:":" |> parse_pairs + + let define_fields ~path l = + let kv = + ListLabels.fold_left l ~init:KV.empty ~f:(fun acc (key, ds) -> + KV.add key ds acc + ) + in + parse_keyvalue kv ~path:(Filename.concat "/proc/self" path) + + let unknown = ds_int64 (-1L) + + let kib ?default key desc = + (key, (parse_value_kib, kib ?default key desc unknown)) + + let count ?default key desc = + (key, (parse_value_count, count ?default key desc unknown)) + + module Fields = struct + let vmdata = "VmData" + + let vmpte = "VmPTE" + + let threads = "Threads" + + let fdsize = "FDSize" + + let vmsize = "VmSize" + + let vmlck = "VmLck" + + let vmpin = "VmPin" + + let vmstk = "VmStk" + + let rss = "Rss" + + (* there is also /proc/self/stat and /proc/self/statm, but we'd need to open and parse both *) + let status = + define_fields ~path:"status" + [ + count threads "Total number of threads used by %s" + ; count fdsize "Total number of file descriptors used by %s" + ; kib vmsize "Total amount of memory mapped by %s" + ; kib vmlck "Total amount of memory locked by %s" + ; kib vmpin "Total amount of memory pinned by %s" + (* VmRSS is inaccurate accoring to latest proc(5) *) + ; kib vmdata + "Total amount of writable, non-shared and non-stack memory used by \ + %s" + ; kib vmstk "Total amount of main stack memory used by %s" + ; kib vmpte "Total amount of page table entry memory used by %s" + ] + + (* According to latest proc(5) these are slower, but provide more accurate information. + The RSS reported by other stat counters could be off depending on the number of threads. *) + let smaps_rollup = + define_fields ~path:"smaps_rollup" + [kib rss "Total amount of resident memory used by %s"] + end + + let find key kv = to_float (KV.get key kv) + + let to_list kv = KV.bindings kv |> List.rev_map snd +end + +module GcStat = struct + open SimpleMetrics + open Gc + + let ocaml_total = + let field = kib "ocaml_total" "Total OCaml memory used by %s" in + fun gc control -> + gc.heap_words + control.minor_heap_size |> float |> words_to_kib |> field + + let maybe_words name description v = + (* quick_stat would return a value of 0, which is not valid *) + v |> float |> words_to_kib |> kib ~min:0.001 name description + + let memory_allocation_precise (gc, control) = + [ + ocaml_total gc control + ; gc.minor_words +. gc.major_words -. gc.promoted_words + |> words_to_kib + |> kib_per_s "ocaml_allocation_rate" + "Amount of allocations done by OCaml in the given period by %s" + ] + + let memory_allocation_approx_expensive (gc, _control) = + [ + (* see https://github.com/ocaml/ocaml/blob/trunk/stdlib/gc.mli#L50-L59, without running a major + cycle the live_words may overestimate the actual live words, "live" just means "not currently + known to be collectible"*) + gc.live_words + |> maybe_words "ocaml_maybe_live" + "OCaml memory not currently known to be collectable by %s" + ; gc.free_words |> maybe_words "ocaml_free" "OCaml memory available to %s" + ] +end + +module Derived = struct + open SimpleMetrics + + let memextra_kib = + kib "mem_extra" "Total amount of non-OCaml and non-stack memory used by %s" + + let ulimit_stack = + let ic = Unix.open_process_in "ulimit -s" in + let r = ic |> input_line |> Int64.of_string in + close_in_noerr ic ; Int64.to_float r + + let memextra_kib stats (gc_stat, control) = + let ocaml_total_kib = GcStat.ocaml_total gc_stat control |> to_float in + let vmdata = Proc.find Proc.Fields.vmdata stats in + let vmpte = Proc.find Proc.Fields.vmpte stats in + let threads = Proc.find Proc.Fields.threads stats in + (* Each thread, except the main one will allocate 'ulimit -s' worth of VmData. + This won't immediately show up in VmRss until actually used (e.g. by a deep calltree due to recursion) *) + vmdata -. vmpte -. (ulimit_stack *. (threads -. 1.)) -. ocaml_total_kib + |> ds_float + |> memextra_kib +end + +let observe_stats l = + let names = ListLabels.rev_map l ~f:(fun ds -> ds.Ds.ds_name) in + let values = + ListLabels.rev_map l ~f:(fun ds -> + let f = + match ds.Ds.ds_value with + | Rrd.VT_Int64 i -> + Int64.to_float i + | Rrd.VT_Float f -> + f + | Rrd.VT_Unknown -> + nan + in + ds.Ds.ds_pdp_transform_function f |> Printf.sprintf "%.0f" + ) + in + D.debug "stats header: %s" (String.concat "," names) ; + D.debug "stats values: %s" (String.concat "," values) + +let generate_expensive_stats = + let generate_dss () = + let stat = Gc.stat () in + let gc_control = Gc.get () in + let rss = Proc.Fields.smaps_rollup () |> Proc.to_list in + let gcstat = GcStat.memory_allocation_approx_expensive (stat, gc_control) in + List.rev_append rss gcstat + in + SlowReporter.iter_of_fold (SlowReporter.report ~interval_s:150. ~generate_dss) + +let generate_stats_exn () = + let status = Proc.Fields.status () in + let gc_stat = Gc.quick_stat () in + let gc_control = Gc.get () in + let derived = Derived.memextra_kib status (gc_stat, gc_control) in + let gcstat = GcStat.memory_allocation_precise (gc_stat, gc_control) in + let is_slow, slow_stats = generate_expensive_stats () in + let stats = + derived :: List.concat [gcstat; Proc.to_list status; slow_stats] + in + if is_slow then + observe_stats stats ; + stats |> List.rev_map (fun x -> (Rrd.Host, x)) + +let generate_stats () = + try generate_stats_exn () + with e -> + D.log_backtrace () ; + D.debug "Failed to generate stats: %s" (Printexc.to_string e) ; + [] + +(* xapi currently exports 5 datasources if a slave or 7 if a master; this + * comfortably fits into a single page. *) +let shared_page_count = 1 + +let start () = reporter_async ~shared_page_count ~dss_f:generate_stats + +let stop reporter = Rrdd_plugin.Reporter.cancel ~reporter diff --git a/ocaml/xenopsd/xc/mem_stats.mli b/ocaml/xenopsd/xc/mem_stats.mli new file mode 100644 index 00000000000..d42fc1397c5 --- /dev/null +++ b/ocaml/xenopsd/xc/mem_stats.mli @@ -0,0 +1,22 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val start : unit -> Rrdd_plugin.Reporter.t +(** Start a thread which will act as an RRDD plugin, and report interesting + stats to RRDD. *) + +val stop : Rrdd_plugin.Reporter.t -> unit +(** Stop the stats reporting thread. *) + +val generate_stats : unit -> (Rrd.ds_owner * Ds.ds) list diff --git a/ocaml/xenopsd/xc/memory_breakdown.ml b/ocaml/xenopsd/xc/memory_breakdown.ml index fae014a6ce3..54e739fe9aa 100644 --- a/ocaml/xenopsd/xc/memory_breakdown.ml +++ b/ocaml/xenopsd/xc/memory_breakdown.ml @@ -109,7 +109,7 @@ let xs_read_bytes_from_kib_key xs path = (** {2 Host fields} *) -let host_time _ = Date.to_string (Date.of_float (Unix.gettimeofday ())) +let host_time _ = Date.(to_rfc3339 (now ())) let host_total_bytes h = Int64.to_string diff --git a/ocaml/xenopsd/xc/memory_summary.ml b/ocaml/xenopsd/xc/memory_summary.ml index c63e495ccb4..e2abb80264d 100644 --- a/ocaml/xenopsd/xc/memory_summary.ml +++ b/ocaml/xenopsd/xc/memory_summary.ml @@ -55,7 +55,7 @@ let _ = in if not !hash then ( Printf.printf "%s %Ld %Ld" - (Date.to_string (Date.of_float (Unix.gettimeofday ()))) + Date.(to_rfc3339 (now ())) (total_pages ** one_page) (free_pages ** one_page) ; let domains = List.stable_sort (fun (a, _) (b, _) -> compare a b) domains diff --git a/ocaml/xenopsd/xc/xenops_xc_main.ml b/ocaml/xenopsd/xc/xenops_xc_main.ml index b7fce8d0b65..b49f8f0f6d3 100644 --- a/ocaml/xenopsd/xc/xenops_xc_main.ml +++ b/ocaml/xenopsd/xc/xenops_xc_main.ml @@ -57,4 +57,6 @@ let _ = ~specific_nonessential_paths:Xc_resources.nonessentials () ; check_domain0_uuid () ; make_var_run_xen () ; + let reporter = Mem_stats.start () in + at_exit (fun () -> Mem_stats.stop reporter) ; Xenopsd.main (module Xenops_server_xen : Xenops_server_plugin.S) diff --git a/ocaml/xenopsd/xenopsd.conf b/ocaml/xenopsd/xenopsd.conf index 94fcafefbd0..e80194c1f55 100644 --- a/ocaml/xenopsd/xenopsd.conf +++ b/ocaml/xenopsd/xenopsd.conf @@ -61,9 +61,6 @@ disable-logging-for=http tracing tracing_export # Path to the vbd backend script # vbd-xl-script=/usr/lib/xcp/scripts/block -# Path to the qemu vif script -# qemu-vif-script=/etc/xcp/scripts/qemu-vif-script - # Path to the PCI FLR script # pci-flr-script=/opt/xensource/libexec/pci-flr diff --git a/pyproject.toml b/pyproject.toml index d7bb0d7d1a7..512eac89030 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # https://packaging.python.org/en/latest/specifications/pyproject-toml/ [project] name = "xen-api" -requires-python = ">=3.6.*" +requires-python = ">=3.6.0" license = {file = "LICENSE"} keywords = ["xen-project", "Xen", "hypervisor", "libraries"] maintainers = [ @@ -30,15 +30,36 @@ line-length = 88 # ----------------------------------------------------------------------------- # Coverage.py - https://coverage.readthedocs.io/en/coverage-5.5/config.html +# +# [tool.coverage.run] and [tool.coverage.report] configure these commands: +# coverage run && coverage report +# +# These work in conjunction with [tool.pytest.ini_options] to set defaults +# for running pytest (on its own) and for running Coverage.py with pytest: +# +# Examples for Python test development with Coverage.py: +# +# Run the default tests and check coverage: +# coverage run && coverage report +# +# Run a custom set of tests and check coverage: +# coverage run -m pytest python3/tests/test_*.py && coverage report # ----------------------------------------------------------------------------- [tool.coverage.report] # Here, developers can configure which lines do not need to be covered by tests: +# fail_under: minimum code coverage percentage +fail_under = 50 +# exclude_lines: lines that are not required to be covered exclude_lines = [ "pragma: no cover", # standard pragma for not covering a line or block "if TYPE_CHECKING:", # imports for type checking only "pass", # Other specific lines that do not need to be covered, comment in which file: + "raise NbdDeviceNotFound", # python3/libexec/usb_scan.py + "params = xmlrpc.client.loads", # static-vdis + "assert.*# must not be None", # static-vdis + "except Exception:", # static-vdis ] # precision digits to use when reporting coverage (sub-percent-digits are not reported): precision = 0 @@ -48,7 +69,8 @@ skip_covered = true [tool.coverage.run] # Default command line for "coverage run": Run pytest in non-verbose mode -command_line = "-m pytest -p no:logging -p no:warnings" +command_line = "-m pytest -v -ra" + # Default data file for "coverage run": Store coverage data in .git/.coverage data_file = ".git/.coverage" # Default context for "coverage run": Use the name of the test function @@ -71,7 +93,7 @@ relative_files = true # Default output when writing "coveragle xml" data. This needs to match what # diff-cover and coverage upload to Codecov expect [tool.coverage.xml] -output = ".git/coverage.xml" +output = ".git/coverage3.11.xml" # Default output directory for writing "coverage html" data. @@ -88,7 +110,6 @@ profile = "black" combine_as_imports = true ensure_newline_before_comments = false - # ----------------------------------------------------------------------------- # Mypy static analysis - https://mypy.readthedocs.io/en/stable/config_file.html # ----------------------------------------------------------------------------- @@ -98,10 +119,19 @@ ensure_newline_before_comments = false # PYTHONPATH="scripts/examples/python:.:scripts:scripts/plugins:scripts/examples" files = [ "python3", - "scripts/usb_reset.py", + "scripts/examples/python", +] +exclude = [ + "python3/packages", + "python3/stubs", + "python3/tests", ] pretty = true +mypy_path = "python3/packages:python3/stubs:scripts/examples/python" error_summary = true +# default_return = false sets the default return type of functions to 'Any'. +# It makes mypy less noisy on untyped code makes it more usable now: +default_return = false strict_equality = true show_error_codes = true show_error_context = true @@ -117,7 +147,16 @@ disallow_any_explicit = false disallow_any_generics = true disallow_any_unimported = true disallow_subclassing_any = true -disable_error_code = ["import-untyped"] # XenAPI is not typed yet +disable_error_code = [ + "explicit-override", + "misc", + "no-any-decorated", + "no-any-expr", + "no-untyped-call", + "no-untyped-def", + "no-untyped-usage", + "import-untyped", # XenAPI is not typed yet +] [[tool.mypy.overrides]] @@ -156,8 +195,10 @@ disable = [ "no-else-break", # else clause following a break statement "protected-access", # Best done during the code cleanup phase "super-with-arguments", # Consider using Python 3 style super(no args) calls + "too-few-public-methods", # Some classes only overload private methods, is fine "too-many-branches", # Existing code breaches this, not part of porting "too-many-arguments", # Likewise, not part of porting + "too-many-lines", # Likewise, not part of porting "too-many-locals", # Likewise, not part of porting "too-many-statements", # Likewise, not part of porting "unnecessary-pass", # Cosmetic, best done during the code cleanup phase @@ -167,64 +208,69 @@ disable = [ # ----------------------------------------------------------------------------- # Pyright is the static analysis behind the VSCode Python extension / Pylance -# https://microsoft.github.io/pyright/#/configuration?id=main-configuration-options +# https://microsoft.github.io/pyright/#/configuration # ----------------------------------------------------------------------------- [tool.pyright] -# Specifies the paths of directories or files that should be included in the -# analysis. If no paths are specified, all files in the workspace are included: -include = ["python3", "ocaml/xcp-rrdd"] - -# Conditionalize the stube files for type definitions based on the platform: -pythonPlatform = "Linux" - -# typeCheckingMode: "off", "basic", "standard" or "strict" -typeCheckingMode = "standard" - -# Specifies the version of Python that will be used to execute the source code. -# Generate errors if the source code makes use of language features that are -# not supported in that version. It will also tailor its use of type stub files, -# which conditionalizes type definitions based on the version. If no version is -# specified, pyright will use the version of the current python interpreter, -# if one is present: -pythonVersion = "3.6" - -# Paths of directories or files that should use "strict" analysis if they are -# included. This is the same as manually adding a "# pyright: strict" comment. -# In strict mode, most type-checking rules are enabled, and the type-checker -# will be more aggressive in inferring types. If no paths are specified, strict -# mode is not enabled: -strict = ["python3/tests/test_observer.py"] - -# -# Paths to exclude from analysis. If a file is excluded, it will not be -# analyzed. -# -# FIXME: Some of these may have type errors, so they should be inspected and fixed: -# -exclude = [ +# include: directories to include in checking +# strict: paths for which strict checking works +# typeCheckingMode: set the standard type checking mode +include = ["python3", "ocaml/xcp-rrdd"] +strict = ["python3/tests/observer"] +stubPath = "python3/stubs" +pythonPlatform = "Linux" +typeCheckingMode = "standard" +reportMissingImports = false +reportMissingModuleSource = false +pythonVersion = "3.6" +exclude = [ "ocaml/xcp-rrdd/scripts/rrdd/rrdd.py", "ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py", "python3/packages/observer.py", - "python3/tests/pytype_reporter.py", + "python3/examples/XenAPI/XenAPI.py", + "python3/examples/XenAPIPlugin.py", + ] # ----------------------------------------------------------------------------- # Pytest is the test framework, for discovering and running tests, fixtures etc -# https://pytest.readthedocs.io/en/latest/customize.html +# https://pytest.readthedocs.io/en/latest/customize.html, https://docs.pytest.org # ----------------------------------------------------------------------------- - [tool.pytest.ini_options] -addopts = "-ra" # Show the output of all tests, including those that passed -log_cli = true # Capture log messages and show them in the output as well +# ----------------------------------------------------------------------------- +# addopts: Options to add to all pytest calls: +# -v show what happens +# -ra show short summary after running tests +# +# addopts are added to all pytest calls. We don't add options that would force +# testing specific paths. To be flexible, we use use testpaths instead(see below) +# ----------------------------------------------------------------------------- +addopts = "-v -ra" + +# ----------------------------------------------------------------------------- +# Other pytest config options: +# log_cli: show logger messages +# log_cli_level: log level to show +# python_files: pattern for test files +# python_functions: pattern for test functions +# testpaths: directories to search for tests(by default, used for CI) +# For development, developers can test only specific files: +# Example: pytest python3/tests/test_perfmon.py +# minversion: this config requires pytest>=7 to configure pythonpath +# pythonpath: path to stub files and typing stubs for tests +# xfail_strict: require to remove pytext.xfail marker when test is fixed +# required_plugins: require that these plugins are installed before testing +# ----------------------------------------------------------------------------- +testpaths = ["python3", "ocaml/xcp-rrdd", "ocaml/xenopsd"] +required_plugins = ["pytest-mock"] log_cli_level = "INFO" +log_cli = true +minversion = "7.0" +pythonpath = "python3/stubs" # Allow to import the XenAPI module python_files = ["test_*.py", "it_*.py"] python_functions = ["test_", "it_", "when_"] -pythonpath = "scripts/examples/python" # Allows to import the XenAPI module -required_plugins = ["pytest-mock"] -testpaths = ["python3", "scripts", "ocaml/xcp-rrdd"] xfail_strict = true # is used to fail tests that are marked as xfail but pass(for TDD) @@ -236,16 +282,26 @@ discard_messages_matching = [ "No attribute 'group' on None", "No Node.TEXT_NODE in module xml.dom.minidom, referenced from 'xml.dom.expatbuilder'" ] -expected_to_fail = [] +expected_to_fail = [ +] +# ----------------------------------------------------------------------------- +# pytype: Google's static type analyzer - https://google.github.io/pytype/ +# ----------------------------------------------------------------------------- [tool.pytype] inputs = [ "python3/", "ocaml/xcp-rrdd", + "ocaml/xenopsd", + "ocaml/xapi-storage/python", + "ocaml/xapi-storage-script", + "ocaml/vhd-tool", ] disable = [ + # Reduce noise from python scripts(import yum, xenfsimage, xcp, urlgrabber) + "import-error", ] platform = "linux" # Allow pytype to find the XenAPI module, the rrdd module and python3 modules: -pythonpath = "python3:scripts/examples/python:ocaml/xcp-rrdd/scripts/rrdd" +pythonpath = "python3/examples:." diff --git a/python3/Makefile b/python3/Makefile index 424dce190ff..fed125c01bb 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -1,13 +1,64 @@ include ../config.mk -SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") +# To replace strings in *.service like: @OPTDIR@ -> ${OPTDIR} +IPROG=../scripts/install.sh 755 +IDATA=../scripts/install.sh 644 -IDATA=install -m 644 +SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") DNF_PLUGIN_DIR=dnf-plugins install: - mkdir -p $(DESTDIR)$(SITE3_DIR) - mkdir -p $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR) + # Create destination directories using install -m 755 -d: + install -m 755 -d $(DESTDIR)$(OPTDIR)/bin + install -m 755 -d $(DESTDIR)$(SITE3_DIR) + install -m 755 -d $(DESTDIR)$(LIBEXECDIR) + install -m 755 -d $(DESTDIR)$(PLUGINDIR) + install -m 755 -d $(DESTDIR)/etc/sysconfig + install -m 755 -d $(DESTDIR)/usr/lib/systemd/system + install -m 755 -d $(DESTDIR)$(EXTENSIONDIR) + install -m 755 -d $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR) + + $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) dnf_plugins/accesstoken.py $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR)/ $(IDATA) dnf_plugins/ptoken.py $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR)/ + + $(IPROG) libexec/host-display $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/link-vms-by-sr.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/usb_reset.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/print-custom-templates $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/mail-alarm $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) + + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/xe-reset-networking $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/static-vdis $(DESTDIR)$(OPTDIR)/bin + $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) + $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) + $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) + $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo + $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) + + $(IPROG) perfmon/perfmon $(DESTDIR)$(PLUGINDIR) + $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service + $(IPROG) perfmon/sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon + +# example/python + $(IDATA) examples/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ + $(IDATA) examples/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ + + +# poweron + $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py + $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan + $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host + $(IPROG) poweron/IPMI.py $(DESTDIR)$(PLUGINDIR)/IPMI.py diff --git a/python3/__init__.py b/python3/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/scripts/hfx_filename b/python3/bin/hfx_filename similarity index 76% rename from scripts/hfx_filename rename to python3/bin/hfx_filename index cea0f808200..616e5921abb 100755 --- a/scripts/hfx_filename +++ b/python3/bin/hfx_filename @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015 Citrix, Inc. # @@ -14,8 +14,14 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import print_function -import sys, os, socket, urllib2, urlparse, XenAPI, traceback, xmlrpclib +# pylint: disable=redefined-outer-name +# pyright: reportFunctionMemberAccess=false +# pyright: reportOptionalMemberAccess=false, reportAttributeAccessIssue=false + +import sys +import socket + +import XenAPI db_url = "/remote_db_access" @@ -28,18 +34,20 @@ def rpc(session_id, request): headers = [ "POST %s?session_id=%s HTTP/1.0" % (db_url, session_id), "Connection:close", - "content-length:%d" % (len(request)), + "content-length:%d" % (len(request.encode('utf-8'))), "" ] - #print "Sending HTTP request:" for h in headers: - s.send("%s\r\n" % h) - #print "%s\r\n" % h, - s.send(request) + s.send((h + "\r\n").encode('utf-8')) + s.send(request.encode('utf-8')) + + result = "" + while True: + chunk = s.recv(1024) + if not chunk: + break + result += chunk.decode('utf-8') - result = s.recv(1024) - #print "Received HTTP response:" - #print result if "200 OK" not in result: print("Expected an HTTP 200, got %s" % result, file=sys.stderr) return @@ -55,13 +63,15 @@ def rpc(session_id, request): s.close() def parse_string(txt): + if not txt: + raise Exception("Unable to parse string response: None") prefix = "success" if not txt.startswith(prefix): - raise "Unable to parse string response" + raise Exception("Unable to parse string response: Wrong prefix") txt = txt[len(prefix):] suffix = "" if not txt.endswith(suffix): - raise "Unable to parse string response" + raise Exception("Unable to parse string response: Wrong suffix") txt = txt[:len(txt)-len(suffix)] return txt @@ -76,9 +86,8 @@ def read_field(session_id, table, fld, rf): return response if __name__ == "__main__": - import XenAPI xapi = XenAPI.xapi_local() - xapi.xenapi.login_with_password('root', '') + xapi.xenapi.login_with_password("root", "", "1.0", "hfx_filename") session_id = xapi._session try: rf = db_get_by_uuid(session_id, "pool_patch", sys.argv[1]) diff --git a/python3/bin/perfmon b/python3/bin/perfmon new file mode 100644 index 00000000000..58be93284d7 --- /dev/null +++ b/python3/bin/perfmon @@ -0,0 +1,1615 @@ +#!/usr/bin/env python3 +# +# perfmon - a daemon for monitoring performance of the host on which it is run +# and of all the local VMs, and for generating events based on configurable +# triggers +# +# Notes: +# ====== +# The XAPI instance running on localhost monitors a number of variables +# for each VM running locally (i.e not on other pool members) and +# for the host itself. Each variable is stored in 16 RRDs (Round Robin Databases). +# +# Consolidation Number of samples in RRD +# function 5s/sample 1m/sample 1hr/sample 1day/sample +# AVERAGE 120 (10m) 120 (2h) ? ? +# MIN 120 (10m) 120 (2h) ? ? +# MAX 120 (10m) 120 (2h) ? ? +# LAST 120 (10m) 120 (2h) ? ? +# +# The "Consolidation function" tells how that RRD is built up from the +# one with the next highest sample rate. E.g. In the 1m/sample "AVERAGE" RRD +# each sample is the average of 12 from the 1s/sample "AVERAGE" RRD, whereas +# in the 1m/sample "MIN" RRD each sample is the minimum of 12 from the 1s/sample +# "AVERAGE" RRD. +# +# When XAPI is queried over http it selects the column (e.g. "1hr/sample") +# based on the "start" CGI param. It will return the highest level of granularity +# available for the period requested. +# +# The "cf" CGI param specifies the row. If it is not set, all rows are returned. + +# pylint: disable=too-many-lines, missing-class-docstring +# pytype: disable=attribute-error + +import subprocess +import gc +import getopt +import os +import random +import re +import signal +import socket +import sys +import syslog +import time +import traceback +import urllib.request + +# used to parse rrd_updates because this may be large and sax is more efficient +from xml import sax + +# used to parse other-config:perfmon. Efficiency is less important than reliability here +from xml.dom import minidom # pytype: disable=pyi-error +from xml.parsers.expat import ExpatError + +import XenAPI + + +def print_debug(string): # pragma: no cover + if debug: + print("DEBUG:", string, file=sys.stderr) + syslog.syslog(syslog.LOG_USER | syslog.LOG_INFO, "PERFMON(DEBUG): %s" % string) + + +def log_err(string): + print(string, file=sys.stderr) + syslog.syslog(syslog.LOG_USER | syslog.LOG_ERR, "PERFMON: %s" % string) + pass + + +def log_info(string): # pragma: no cover + print(string, file=sys.stderr) + syslog.syslog(syslog.LOG_INFO | syslog.LOG_INFO, "PERFMON: %s" % string) + pass + + +def debug_mem(): # pragma: no cover + objCount = {} + gc.collect() + objList = gc.get_objects() + for obj in objList: + if getattr(obj, "__class__", None): + name = obj.__class__.__name__ + else: + name = type(obj) + if name in objCount: + objCount[name] += 1 + else: + objCount[name] = 1 + + output = [] + for name, cnt in objCount.items(): + output.append("%s :%s" % (name, cnt)) + log_info("\n".join(output)) + + +class PerfMonException(Exception): + pass + + +class XmlConfigException(PerfMonException): + pass + + +class UsageException(Exception): + pass + + +class IncorrectInputException(Exception): + pass + +# Start a session with the master of a pool. +# Note: when calling http://localhost/rrd_update we must pass the session +# ID as a param. The host then uses this to verify our validity with +# the master before responding. +# If the verification fails we should get a 401 response +class XapiSession(XenAPI.Session): # pragma: no cover + """Object that represents a XenAPI session with the pool master + One of these is needed to refresh a VMMonitor or HOSTMonitor config, or + to refresh an RRDUpdates object + """ + + def __init__(self): + XenAPI.Session.__init__( + self, "http://_var_xapi_xapi", transport=XenAPI.UDSTransport() + ) + self.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-perfmon") + + def __del__(self): + self.xenapi.session.logout() + + def id(self): + return self._session + + +class ObjectReport: + def __init__(self, objtype, uuid): + self.objtype = ( + objtype # a string like "vm", or "host" taken from an tag + ) + self.uuid = uuid # the object's uuid + self.vars = {} # maps rrd variable name to array of floats + + def get_uuid(self): + return self.uuid + + def get_var_names(self): + return list(self.vars.keys()) + + def get_value(self, var_name, row): + try: + return (self.vars[var_name])[row] + except Exception: + return 0.0 + + def insert_value(self, var_name, index, value): + if var_name not in self.vars: + self.vars[var_name] = [] + self.vars[var_name].insert(index, value) + + +# pylint: disable=too-few-public-methods +class RRDReport: + "This is just a data structure passed that is completed by RRDContentHandler" + + def __init__(self): + self.reset() + + def reset(self): + self.columns = 0 # num xapi vars in xml + self.rows = 0 # num samples in xml + self.start_time = 0 # timestamp of 1st sample in xml + self.end_time = 0 # timestamp of last sample in xml + self.step_time = 0 # seconds between each pair of samples + self.obj_reports = {} # maps uuids to ObjectReports, built from xml + + +class RRDColumn: + "class used internally by RRDContentHandler" + + def __init__(self, paramname, obj_report): + self.paramname = paramname + self.obj_report = obj_report + + +# pylint: disable=too-many-instance-attributes +class RRDContentHandler(sax.ContentHandler): + """Handles data in this format: + + + INTEGER + INTEGER + INTEGER + INTEGER + INTEGER + + IGNOREME:(host|vm):UUID:PARAMNAME + ... another COLUMNS-1 entries ... + + + + + INTEGER(END_TIME) + FLOAT + ... another COLUMNS-1 values ... + + ... another ROWS-2 rows + + INTEGER(START_TIME) + FLOAT + ... another COLUMNS-1 values ... + + + + """ + + def __init__(self, report): + ''' + report is saved and later updated by this object. + report should contain defaults already + ''' + super().__init__() + self.report = report + self.in_start_tag = False + self.in_step_tag = False + self.in_end_tag = False + self.in_rows_tag = False + self.in_columns_tag = False + self.in_entry_tag = False + self.in_row_tag = False + self.column_details = [] + self.row = 0 + self.raw_text = "" + self.col = 0 + self.in_t_tag = False + self.in_v_tag = False + + def startElement(self, name, attrs): + self.raw_text = "" + if name == "start": + self.in_start_tag = True + elif name == "step": + self.in_step_tag = True + elif name == "end": + self.in_end_tag = True + elif name == "rows": + self.in_rows_tag = True + elif name == "columns": + self.in_columns_tag = True + elif name == "entry": + self.in_entry_tag = True + elif name == "row": + self.in_row_tag = True + self.col = 0 + + if self.in_row_tag: + if name == "t": + self.in_t_tag = True + elif name == "v": + self.in_v_tag = True + + def characters(self, content): + conditions = [ + self.in_start_tag, + self.in_step_tag, + self.in_end_tag, + self.in_rows_tag, + self.in_columns_tag, + self.in_entry_tag, + self.in_t_tag, + self.in_v_tag + # self.in_row_tag + # ignore text under row tag, s are just for holding and nodes + ] + if any(conditions): + self.raw_text += content + + def endElement(self, name): + if name == "start": + # This overwritten later if there are any rows + self.report.start_time = int(self.raw_text) + self.in_start_tag = False + elif name == "step": + self.report.step_time = int(self.raw_text) + self.in_step_tag = False + elif name == "end": + # This overwritten later if there are any rows + self.report.end_time = int(self.raw_text) + self.in_end_tag = False + elif name == "rows": + self.report.rows = int(self.raw_text) + self.in_rows_tag = False + elif name == "columns": + self.report.columns = int(self.raw_text) + self.in_columns_tag = False + elif name == "entry": + (_, objtype, uuid, paramname) = self.raw_text.split(":") + # lookup the obj_report corresponding to this uuid, or create if it does not exist + if uuid not in self.report.obj_reports: + self.report.obj_reports[uuid] = ObjectReport(objtype, uuid) + obj_report = self.report.obj_reports[uuid] + + # save the details of this column + self.column_details.append(RRDColumn(paramname, obj_report)) + self.in_entry_tag = False + elif name == "row": + self.in_row_tag = False + self.row += 1 + elif name == "t": + # Extract start and end time from row data + # as it's more reliable than the values in the meta data + t = int(self.raw_text) + # Last row corresponds to start time + self.report.start_time = t + if self.row == 0: + # First row corresponds to end time + self.report.end_time = t + + self.in_t_tag = False + + elif name == "v": + v = float(self.raw_text) + + # Find object report and paramname for this col + col_details = self.column_details[self.col] + obj_report = col_details.obj_report + paramname = col_details.paramname + + # Update object_report + obj_report.insert_value( + paramname, index=0, value=v + ) # use index=0 as this is the earliest sample so far + + # Update position in row + self.col += 1 + + self.in_v_tag = False + + +# An object of this class should persist the lifetime of the program +class RRDUpdates: + """Object used to get and parse the output the http://localhost/rrd_udpates?...""" + + def __init__(self): + # params are what get passed to the CGI executable in the URL + self.params = {} + self.params["start"] = int(time.time()) - interval # interval seconds ago + self.params["host"] = "true" # include data for host (as well as for VMs) + self.params["sr_uuid"] = "all" # include data for all SRs attached to this host + self.params["cf"] = ( + "AVERAGE" # consolidation function, each sample averages 12 from the 5 second RRD + ) + self.params["interval"] = str(rrd_step) # distinct from the perfmon interval + self.report = RRDReport() # data structure updated by RRDContentHandler + + def __repr__(self): + return "" % str(self.params) + + def refresh(self, session, override_params=None): + "reread the rrd_updates over CGI and parse" + params = {} + if override_params is not None: + params = override_params + params["session_id"] = session.id() + params.update(self.params) + paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params]) + print_debug("Calling http://localhost/rrd_updates?%s" % paramstr) + + url = "http://localhost/rrd_updates?%s" % paramstr + with urllib.request.urlopen(url) as sock: + xmlsource = sock.read().decode("utf-8") + + # Use sax rather than minidom and save Vvvast amounts of time and memory. + self.report.reset() + sax.parseString(xmlsource, RRDContentHandler(self.report)) + + # Update the time used on the next run + self.params["start"] = ( + self.report.end_time + 1 + ) # avoid retrieving same data twice + + print_debug( + "Refreshed rrd_updates, start = %d, end = %d, rows = %d" + % (self.report.start_time, self.report.end_time, self.report.rows) + ) + + def get_num_rows(self): + "Return the number of samples of each parameter" + return self.report.rows + + def get_obj_report_by_uuid(self, uuid): + "Return an ObjectReport for the object with this uuid" + try: + return self.report.obj_reports[uuid] + except Exception: + return None + + def get_uuid_list_by_objtype(self, objtype): + ''' + Return a list of uuids corresonding to the objects + of this type for which we have ObjectReports + ''' + return [ + objrep.uuid + for objrep in self.report.obj_reports.values() + if objrep.objtype == objtype + ] + + +# Consolidation functions: +supported_consolidation_functions = [ + "sum", + "average", + "max", + "get_percent_fs_usage", + "get_percent_log_fs_usage", + "get_percent_mem_usage", + "get_percent_sr_usage", +] + + +def average(mylist): + if not mylist: + log_err("Error in average, no input data, return 0.0 instead") + return 0.0 + return sum(mylist) / float(len(mylist)) + + +def get_percent_log_fs_usage(_): + ''' + Get the percent usage of the host filesystem for logs partition. + Input list is ignored and should be empty + ''' + fs_output = subprocess.getoutput("df /etc/passwd") + log_fs_output = subprocess.getoutput("df /var/log") + fs_output = " ".join(fs_output.splitlines()[1:]) + log_fs_output = " ".join(log_fs_output.splitlines()[1:]) + # Get the percent usage only when there is a separate logs partition + if fs_output.split()[0] != log_fs_output.split()[0]: + percentage = log_fs_output.split()[4] + # remove % character and convert to float + return float(percentage[0:-1]) / 100.0 + else: + return float("NaN") + + +def get_percent_fs_usage(_): + ''' + Get the percent usage of the host filesystem. + Input list is ignored and should be empty + ''' + # this file is on the filesystem of interest in both OEM and Retail + output = subprocess.getoutput("df /etc/passwd") + output = " ".join( + output.splitlines()[1:] + ) # remove header line and rewrap on single line + percentage = output.split()[4] + # remove % character and convert to float + return float(percentage[0:-1]) / 100.0 + + +def get_percent_mem_usage(_): + ''' + Get the percent usage of Dom0 memory/swap. + Input list is ignored and should be empty + ''' + try: + with open("/proc/meminfo", "r", encoding="utf-8") as memfd: + memlist = memfd.readlines() + # memorylists is a list of lists, each list contains two parts: memtype and size + memorylists = [m.split(":", 1) for m in memlist] + memdict = { + # pytype complained that No attribute 'group' on None + # Let Exception catch the `not matched` issue and return 0.0 + k.strip(): float(re.search(r"\d+", v.strip()).group(0)) + for (k, v) in memorylists + } + # We consider the sum of res memory and swap in use as the hard demand + # of mem usage, it is bad if this number is beyond the physical mem, as + # in such case swapping is obligatory rather than voluntary, hence + # degrading the performance. We define the percentage metrics as + # (res_mem + swap_in_use) / phy_mem, which could potentially go beyond + # 100% (but is considered bad when it does) + mem_in_use = ( + memdict["MemTotal"] + - memdict["MemFree"] + - memdict["Buffers"] + - memdict["Cached"] + ) + swap_in_use = memdict["SwapTotal"] - memdict["SwapFree"] + return float(mem_in_use + swap_in_use) / memdict["MemTotal"] + except Exception as e: + log_err("Error %s in get_percent_mem_usage, return 0.0 instead" % e) + return 0.0 + + +def get_percent_sr_usage(mylist): + """ + Get the percent usage of the SR. + Input list should be exactly two items: [physical_utilisation, size] + """ + try: + if len(mylist) != 2: + raise IncorrectInputException( + "Incorrect number of values to consolidate: %d (exactly 2 values)" + % len(mylist) + ) + physical_utilisation, size = mylist[0:2] + return float(physical_utilisation) / size + except Exception as e: + log_err("Error %s in get_percent_sr_usage, return 0.0 instead" % e) + return 0.0 + + +# pylint: disable=too-few-public-methods +class VariableConfig: + """Object storing the configuration of a Variable + + Initialisation parameters: + xmldoc = dom object representing the nodes in the ObjectMonitor config strings. + See VMMonitor.__doc__ and HOSTMonitor.__doc__ + alarm_create_callback = + callback called by Variable.update() to create and send an alarm + get_default_variable_config = + a function that VariableConfig.__init__() uses to lookup default tag values + by variable name + """ + + def __init__(self, xmldoc, alarm_create_callback, get_default_variable_config): + try: + name = xmldoc.getElementsByTagName("name")[0].getAttribute("value") + except IndexError as e: + raise XmlConfigException("variable missing 'name' tag") from e + + def get_value(tag): + try: + return xmldoc.getElementsByTagName(tag)[0].getAttribute("value") + except Exception: + return get_default_variable_config(name, tag) + + rrd_regex = get_value("rrd_regex") + consolidation_fn = get_value("consolidation_fn") + alarm_trigger_level = get_value("alarm_trigger_level") + alarm_trigger_period = get_value("alarm_trigger_period") + alarm_auto_inhibit_period = get_value("alarm_auto_inhibit_period") + alarm_trigger_sense = get_value("alarm_trigger_sense") + alarm_priority = get_value("alarm_priority") + + # Save xmldoc: we need this when creating the body of the alarms + self.xmldoc = xmldoc + + self.name = name + try: + self.rrd_regex = re.compile("^%s$" % rrd_regex) + except Exception as e: + raise XmlConfigException( + "variable %s: regex %s does not compile" % (name, rrd_regex) + ) from e + + if consolidation_fn not in supported_consolidation_functions: + raise XmlConfigException( + "variable %s: consolidation function %s not supported" + % (name, consolidation_fn) + ) + # It's fine to use eval here + # pylint: disable=eval-used + self.consolidation_fn = eval(consolidation_fn) + + try: + self.alarm_trigger_period = int(alarm_trigger_period) + except Exception as e: + raise XmlConfigException( + "variable %s: alarm_trigger_period %s not an int" + % (name, alarm_trigger_period) + ) from e + + try: + self.alarm_auto_inhibit_period = int(alarm_auto_inhibit_period) + except Exception as e: + raise XmlConfigException( + "variable %s: alarm_auto_inhibit_period %s not an int" + % (name, alarm_auto_inhibit_period) + ) from e + try: + trigger_level = float(alarm_trigger_level) + except Exception as e: + raise XmlConfigException( + "variable %s: alarm_trigger_level %s not a float" + % (name, alarm_trigger_level) + ) from e + + self.alarm_priority = alarm_priority + + if alarm_trigger_sense == "high": + self.test_level = lambda: (self.value > trigger_level) + else: + self.test_level = lambda: (self.value < trigger_level) + self.alarm_create_callback = alarm_create_callback + + +def variable_configs_differ(vc1, vc2): + "Say whether configuration of one variable differs from that of another" + return vc1.xmldoc.toxml() != vc2.xmldoc.toxml() + + +class VariableState: + """Object storing the state of a Variable""" + + def __init__(self): + self.value = None + # Attributes `alarm_auto_inhibit_period` and `alarm_trigger_period` are defined + # in VariableConfig, and Class Varialbe multiple inherit from + # VariableConfig and VariableState + self.timeof_last_alarm = time.time() - self.alarm_auto_inhibit_period + self.trigger_down_counter = self.alarm_trigger_period + + +class Variable(VariableConfig, VariableState): + """Variable() is used by ObjectMonitor to create one Variable object for each + variable specified in it's config string + """ + + def __init__(self, *args): + VariableConfig.__init__(self, *args) + VariableState.__init__(self) + self.active = True + print_debug("Created Variable %s" % self.name) + + def set_active(self, active): + print_debug( + "set_active on %s. (old, new) = (%s, %s)" % (self.name, self.active, active) + ) + if active == self.active: + return # nothing to do + self.active = active + if active: + VariableState.__init__(self) # reset when reactivating + + def __generate_alarm(self, session): + """Generate an alarm using callback provided by creator + + ... provided that one has not been generated in the last + self.alarm_auto_inhibit_period seconds + """ + t = time.time() + delta = t - self.timeof_last_alarm + print_debug( + "Time since last alarm for var %s is %d - %d = %d. Refractory period = %d." + % ( + self.name, + t, + self.timeof_last_alarm, + delta, + self.alarm_auto_inhibit_period, + ) + ) + if delta < self.alarm_auto_inhibit_period: + return # we are in the auto inhibit period - do nothing + self.timeof_last_alarm = t + message = "value: %f\nconfig:\n%s" % (self.value, self.xmldoc.toprettyxml()) + + self.alarm_create_callback(self, session, message) + + def update(self, value, session): + """Update the value of the variable using an RRDUpdates object + + Calls self.__generate_alarm() if level has been 'bad' for more than + self.alarm_trigger_period seconds + """ + self.value = value + print_debug("Variable %s set to %f" % (self.name, value)) + if self.test_level(): + # level is bad + self.trigger_down_counter -= rrd_step + if self.trigger_down_counter <= 0: + self.__generate_alarm(session) + # reset trigger counter + self.trigger_down_counter = self.alarm_trigger_period + else: + # level good - reset trigger counter + self.trigger_down_counter = self.alarm_trigger_period + + +class ObjectMonitor: + """Abstract class, used as base for VMMonitor and HOSTMonitor + + Public attributes are uuid, refresh_config() + Inherited classes must implement a public attribute process_rrd_updates() + """ + + def __init__(self, uuid): + self.uuid = uuid + self.xmlconfig = None + # "variables" is the public attribute of interest + self.variables = [] + self.refresh_config() + + def refresh_config(self): + if self.__update_xmlconfig(): + # config has changed - reparse it + try: + self.__parse_xmlconfig() + except XmlConfigException as e: + log_err( + "%s %s config error: %s" % (self.monitortype, self.uuid, str(e)) + ) + except ExpatError as e: + log_err( + "%s %s XML parse error: %s" % (self.monitortype, self.uuid, str(e)) + ) + return True + else: + return False # config unchanged + + def __update_xmlconfig(self): + if self.uuid not in all_xmlconfigs: + xmlconfig = None + else: + xmlconfig = all_xmlconfigs[self.uuid] + changed = False + if xmlconfig != self.xmlconfig: + self.xmlconfig = xmlconfig + changed = True + return changed + + def __parse_xmlconfig(self): + if not self.xmlconfig: + # Possible if this VM/host is not configured yet + self.variables = [] + return + xmldoc = minidom.parseString(self.xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + variable_names = [] + + for vn in variable_nodes: + # create a variable using the config in vn + var = Variable(vn, self.alarm_create, self.get_default_variable_config) + + # Update list of variable names + if var.name not in variable_names: + variable_names.append(var.name) + + # build list of variables already present with same name + vars_with_same_name = [v for v in self.variables if v.name == var.name] + count = 0 + append_var = True + for v in vars_with_same_name: + # this list should be 0 or 1 long! + if count > 0: + log_err( + "programmer error: found duplicate variable %s (uuid %s)" + % (var.name, self.uuid) + ) + self.variables.remove(v) + continue + count += 1 + + # only replace variable in self.variables if its config has changed. + # This way we don't reset its state + if variable_configs_differ(var, v): + self.variables.remove(v) + else: + append_var = False + + if append_var: + print_debug( + "Appending %s to list of variables for %s UUID=%s" + % (var.name, self.monitortype, self.uuid) + ) + self.variables.append(var) + + # Now delete any old variables that do not appear in the new variable_nodes + variables_to_remove = [ + v for v in self.variables if v.name not in variable_names + ] + for v in variables_to_remove: + print_debug( + "Deleting %s from list of variables for UUID=%s" % (v.name, self.uuid) + ) + self.variables.remove(v) + + def get_active_variables(self): + return self.variables + + def process_rrd_updates(self, rrd_updates, session): + print_debug( + "%sMonitor processing rrd_updates for %s" % (self.monitortype, self.uuid) + ) + obj_report = rrd_updates.get_obj_report_by_uuid(self.uuid) + num_rows = rrd_updates.get_num_rows() + if not obj_report: + return + params_in_obj_report = obj_report.get_var_names() + + for var in self.get_active_variables(): + # find the subset of the params returned for this object + # that we need to consolidate into var + params_to_consolidate = list( + filter(var.rrd_regex.match, params_in_obj_report) + ) + for row in range(num_rows): + # Get the values to consolidate + values_to_consolidate = [ + obj_report.get_value(param, row) for param in params_to_consolidate + ] + # Consolidate them + value = var.consolidation_fn(values_to_consolidate) + # Pass result on to the variable object + # This may result in an alarm being generated + var.update(value, session) + + def alarm_create(self, var, session, message): + "Callback used by Variable var to actually send an alarm" + print_debug( + "Creating an alarm for %s %s, message: %s" + % (self.monitortype, self.uuid, message) + ) + session.xenapi.message.create( + "ALARM", var.alarm_priority, self.monitortype, self.uuid, message + ) + + +class VMMonitor(ObjectMonitor): + """Object that maintains state of one VM + + Configured by writing an xml string into an other-config key, e.g. + xe vm-param-set uuid=$vmuuid other-config:perfmon=\ + ' + ' + + Notes: + - Multiple nodes allowed + - full list of child nodes is + * name: what to call the variable (no default) + * alarm_priority: the priority of the messages generated (default '3') + * alarm_trigger_level: level of value that triggers an alarm (no default) + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') + * consolidation_fn: + how to combine variables from rrd_updates into one value + (default is 'average' for 'cpu_usage', 'get_percent_fs_usage' for 'fs_usage', + 'get_percent_log_fs_usage' for 'log_fs_usage', + 'get_percent_mem_usage' for 'mem_usage', & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe vm-data-sources-list uuid=$vmuuid) used to compute value + (only has defaults for "cpu_usage", "network_usage", and "disk_usage") + """ + + def __init__(self, *args): + self.monitortype = "VM" + ObjectMonitor.__init__(self, *args) + print_debug("Created VMMonitor with uuid %s" % self.uuid) + + def get_default_variable_config(self, variable_name, config_tag): + "This allows user to not specify full set of tags for each variable in xml config" + if config_tag == "consolidation_fn": + if variable_name == "cpu_usage": + return "average" + elif variable_name == "fs_usage": + return "get_percent_fs_usage" + elif variable_name == "log_fs_usage": + return "get_percent_log_fs_usage" + elif variable_name == "mem_usage": + return "get_percent_mem_usage" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "cpu_usage": + return "cpu[0-9]+" + elif variable_name == "network_usage": + return "vif_[0-9]+_[rt]x" + elif variable_name == "disk_usage": + return "vbd_(xvd|hd)[a-z]+_(read|write)" + elif variable_name == "fs_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "log_fs_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "mem_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "memory_internal_free": + return variable_name + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_level": + if variable_name == "fs_usage": + return "0.9" # trigger when 90% full + elif variable_name == "log_fs_usage": + return "0.9" # trigger when 90% full + elif variable_name == "mem_usage": + return "0.95" # tigger when mem demanded is close to phy_mem + else: + raise XmlConfigException( + "variable %s: no default alarm_trigger_level - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_sense": + if variable_name == "memory_internal_free": + return "low" + else: + return "high" # trigger if *above* + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) + + +class SRMonitor(ObjectMonitor): + """Object that maintains state of one SR + + Configured by writing an xml string into an other-config key, e.g. + xe sr-param-set uuid=$vmuuid other-config:perfmon=\ + ' + ' + + Notes: + - Multiple nodes allowed + - full list of child nodes is + * name: what to call the variable (no default) + * alarm_priority: the priority of the messages generated (default '3') + * alarm_trigger_level: level of value that triggers an alarm (no default) + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') + * consolidation_fn: + how to combine variables from rrd_updates into one value + (default is 'get_percent_sr_usage' for 'physical_utilistation', + & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe sr-data-sources-list uuid=$sruuid) used to compute value + (has default for "physical_utilistaion") + """ + + def __init__(self, *args): + self.monitortype = "SR" + ObjectMonitor.__init__(self, *args) + print_debug("Created SRMonitor with uuid %s" % self.uuid) + + def get_default_variable_config(self, variable_name, config_tag): + "This allows user to not specify full set of tags for each variable in xml config" + if config_tag == "consolidation_fn": + if variable_name == "physical_utilisation": + return "get_percent_sr_usage" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "physical_utilisation": + return "physical_utilisation|size" + elif variable_name == "sr_io_throughput_total_per_host": + # (these are to drive Host RRDs and so are handled by the HOSTMonitor) + return "_$_DUMMY__" + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_level": + if variable_name == "physical_utilistaion": + return "0.8" # trigger when 80% full + else: + raise XmlConfigException( + "variable %s: no default alarm_trigger_level - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_sense": + return "high" # trigger if *above* + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) + + +class HOSTMonitor(ObjectMonitor): + """Object that maintains state of one Host + + Configured by writing an xml string into an other-config key, e.g. + xe host-param-set uuid=$hostuuid other-config:perfmon=\ + ' + ' + + Notes: + - Multiple nodes allowed + - full list of child nodes is + * name: what to call the variable (no default) + * alarm_priority: the priority of the messages generated (default '3') + * alarm_trigger_level: level of value that triggers an alarm (no default) + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') + * consolidation_fn: how to combine variables from rrd_updates into one value + (default is 'average' for 'cpu_usage' & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe host-data-source-list uuid=$hostuuid) used to compute value + (only has defaults for "cpu_usage", "network_usage", "memory_free_kib" + and "sr_io_throughput_total_xxxxxxxx" + where that last one ends with the first eight characters of the SR uuid) + + Also, as a special case for SR throughput, it is possible to configure a Host by + writing xml into the other-config key of an SR connected to it, e.g. + xe sr-param-set uuid=$sruuid other-config:perfmon=\ + ' + + + This only works for that one specific variable-name, + and rrd_regex must not be specified. + Configuration done on the host directly + (variable-name sr_io_throughput_total_xxxxxxxx) takes priority. + """ + + def __init__(self, *args): + self.monitortype = "Host" + self.secondary_variables = set() + self.secondary_xmlconfigs = {} # map of sr uuid to xml text + ObjectMonitor.__init__(self, *args) + print_debug("Created HOSTMonitor with uuid %s" % self.uuid) + + def get_default_variable_config(self, variable_name, config_tag): + "This allows user to not specify full set of tags for each variable in xml config" + if config_tag == "consolidation_fn": + if variable_name == "cpu_usage": + return "average" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "cpu_usage": + return "cpu[0-9]+" + elif variable_name == "network_usage": + return "pif_eth[0-9]+_[rt]x" + elif variable_name == "memory_free_kib": + return variable_name + elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", variable_name): + return variable_name[3:] + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_sense": + if variable_name == "memory_free_kib": + return "low" + else: + return "high" # trigger if *above* level + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) + + def get_active_variables(self): + r = self.variables + [v for v in self.secondary_variables if v.active] + print_debug( + "Returning active variables: %d main, %d total" + % (len(self.variables), len(r)) + ) + return r + + def refresh_config(self): + main_changed = ObjectMonitor.refresh_config(self) + + # Now handle any extra config from SRs. + # This functionality makes this file inelegant but means that it is + # possible to set up an alarm on each host that uses an SR by setting + # appropriate configuration in the SR's other-config. + if self.uuid not in sruuids_by_hostuuid: + print_debug("%s not in sruuids_by_hostuuid" % self.uuid) + self.secondary_variables.clear() + self.secondary_xmlconfigs.clear() + return + + secondary_changed = False + old_sruuids = set(self.secondary_xmlconfigs) # create set of keys + current_sruuids = sruuids_by_hostuuid[self.uuid] # a set already + if old_sruuids != current_sruuids: + print_debug("Changed set of perfmon sruuids for host %s" % self.uuid) + secondary_changed = True + else: + for sruuid in sruuids_by_hostuuid[self.uuid]: + sr_xmlconfig = all_xmlconfigs[sruuid] + # As an optimisation, if xml unchanged then do not re-parse. + # Otherwise we would create Variables which would + # turn out to be same as existing ones so we would ignore them. + if ( + sruuid in self.secondary_xmlconfigs + and self.secondary_xmlconfigs[sruuid] == sr_xmlconfig + ): + print_debug("Unchanged sr_xmlconfig for sruuid %s" % sruuid) + else: + print_debug( + "Found new/different sr_xmlconfig for sruuid %s" % sruuid + ) + secondary_changed = True + break + + if secondary_changed: + try: + self.__parse_secondary_xmlconfigs() + except XmlConfigException as e: + log_err( + "%s %s secondary config error: %s" + % (self.monitortype, self.uuid, str(e)) + ) + except ExpatError as e: + log_err( + "%s %s secondary XML parse error: %s" + % (self.monitortype, self.uuid, str(e)) + ) + + if main_changed or secondary_changed: + # Calculate which secondary variables are active, + # i.e. not overridden by ones configured on the host rather than the SR. + main_names = {v.name for v in self.variables} + for v in self.secondary_variables: + v.set_active(v.name not in main_names) + + def __parse_secondary_xmlconfigs(self): + variable_names = ( + set() + ) # Names of the Variable objects we create based on the xml nodes we find + self.secondary_xmlconfigs.clear() + for sruuid in sruuids_by_hostuuid[self.uuid]: + print_debug("Looking for config on SR uuid %s" % sruuid) + sr_xmlconfig = all_xmlconfigs[sruuid] + self.secondary_xmlconfigs[sruuid] = sr_xmlconfig + xmldoc = minidom.parseString(sr_xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + found = False + for vn in variable_nodes: + try: + name_element = vn.getElementsByTagName("name")[0] + name = name_element.getAttribute("value") + except IndexError: + log_err( + "variable missing 'name' tag in perfmon xml config of SR %s" + % sruuid + ) + continue # perhaps other nodes are valid + print_debug( + "Found variable with name %s on SR uuid %s" % (name, sruuid) + ) + if name != "sr_io_throughput_total_per_host": + continue # Do nothing unless the variable is meant for the host + if len(vn.getElementsByTagName("rrd_regex")) > 0: + log_err( + "Configuration error:" \ + "rrd_regex must not be specified in config on SR meant for each host" + ) + continue # perhaps another node is valid + if found: + log_err( + "Configuration error: duplicate variable %s on SR %s" + % (name, sruuid) + ) + # A host can only have one Variable from a given SR + # since we only accept one kind (one name). + break + found = True + name_override = "sr_io_throughput_total_%s" % sruuid[0:8] + name_element.setAttribute("value", name_override) + provenance_element = xmldoc.createElement("configured_on") + provenance_element.setAttribute("class", "SR") + provenance_element.setAttribute("uuid", sruuid) + vn.appendChild(provenance_element) + var = Variable(vn, self.alarm_create, self.get_default_variable_config) + variable_names.add(var.name) + append_var = True + vars_with_same_name = [ + v for v in self.secondary_variables if v.name == var.name + ] + for v in vars_with_same_name: + # this list should be 0 or 1 long! + # only replace variable in self.secondary_variables if its config has changed. + # This way we don't reset its state + if variable_configs_differ(var, v): + print_debug( + "Removing existing secondary variable to replace with new: %s" + % v.name + ) + self.secondary_variables.remove(v) + else: + print_debug( + "Found existing secondary variable with same config: %s" + % v.name + ) + append_var = False + if append_var: + print_debug( + "Adding %s to set of secondary variables for host UUID=%s" + % (var.name, self.uuid) + ) + self.secondary_variables.add(var) + + # Now that we have read all the xml items, + # delete any old variables that do not appear in the new variable_nodes + print_debug( + "Going to delete any secondary_variables not in %s" % variable_names + ) + variables_to_remove = [ + v for v in self.secondary_variables if v.name not in variable_names + ] + for v in variables_to_remove: + print_debug( + "Deleting %s from set of secondary variables for UUID=%s" + % (v.name, self.uuid) + ) + self.secondary_variables.remove(v) + + +all_xmlconfigs = {} +sruuids_by_hostuuid = ( + {} +) # Maps host uuid to a set of the uuids of the host's SRs that have other-config:perfmon + + +def update_all_xmlconfigs(session): + """Update all_xmlconfigs, a global dictionary that maps any uuid + (SR, host or VM) to the xml config string in other-config:perfmon keys + and update sruuids_by_hostuuid which together with all_xmlconfigs allows + lookup of the other-config:perfmon xml of the SRs connected to a host""" + # `all_xmlconfigs` and `sruuids_by_hostuuid` are updated by clear() and update() + # pylint: disable=global-variable-not-assigned + global all_xmlconfigs + global sruuids_by_hostuuid + + all_host_recs = session.xenapi.host.get_all_records() + all_vm_recs = session.xenapi.VM.get_all_records() + all_sr_recs = session.xenapi.SR.get_all_records() + + # build dictionary mapping uuids to other_configs + all_otherconfigs = {} + + for recs in (all_host_recs, all_vm_recs, all_sr_recs): + all_otherconfigs.update( + [(recs[ref]["uuid"], recs[ref]["other_config"]) for ref in recs] + ) + + # rebuild dictionary mapping uuids to xmlconfigs + all_xmlconfigs.clear() + all_xmlconfigs.update( + [ + (uuid, other_config["perfmon"]) + for (uuid, other_config) in all_otherconfigs.items() + if "perfmon" in other_config + ] + ) + + # Rebuild another map + sruuids_by_hostuuid.clear() + for _, rec in all_sr_recs.items(): + if "perfmon" in rec["other_config"]: + sruuid = rec["uuid"] + # If we hadn't done SR.get_all_records we would now do SR.get_PBDs. + host_refs = [session.xenapi.PBD.get_host(pbd) for pbd in rec["PBDs"]] + host_uuids = [all_host_recs[ref]["uuid"] for ref in host_refs] + for hu in host_uuids: + if hu in sruuids_by_hostuuid: + sruuids_by_hostuuid[hu].add(sruuid) + else: + sruuids_by_hostuuid[hu] = {sruuid} + + +# 5 minute default interval +interval = 300 +interval_percent_dither = 5 +rrd_step = 60 +debug = False + +# rate to call update_all_xmlconfigs() +config_update_period = 1800 + +# an af_unix socket name (the "\0" stops socket.bind() creating a fs node) +cmdsockname = "\0perfmon" +cmdmaxlen = 256 + +# pylint: disable=global-statement +def main(): # pragma: no cover + global interval + global interval_percent_dither + global rrd_step + global debug + global config_update_period + maxruns = None + try: + argv = sys.argv[1:] + opts, _ = getopt.getopt( + argv, + "i:n:ds:c:D:", + [ + "interval=", + "numloops=", + "debug", + "rrdstep=", + "config_update_period=", + "interval_percent_dither=", + ], + ) + except getopt.GetoptError as e: + raise UsageException from e + + for opt, arg in opts: + if opt in ("-i", "--interval"): + interval = int(arg) + elif opt in ("-n", "--numloops"): + maxruns = int(arg) + elif opt in ("-d", "--debug"): + debug = True + elif opt in ("-s", "--rrdstep"): + rrd_step = int(arg) + if rrd_step not in (5, 60): + raise UsageException + elif opt in ("-c", "--config_update_period"): + config_update_period = int(arg) + elif opt in ("-D", "--interval_percent_dither"): + interval_percent_dither = int(arg) + else: + raise UsageException + + # open the cmd socket (over which we listen for commands such as "refresh") + cmdsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + cmdsock.bind(cmdsockname) + + # The dither on each loop (prevents stampede on master) + rand = random.Random().uniform + dither = (interval * interval_percent_dither) / 100.0 + + # Create a XAPI session on first run + restart_session = True + + # Create a client for getting the rrd_updates over HTTP + rrd_updates = RRDUpdates() + + # Work out when next to update all the xmlconfigs for all the + # hosts and all the VMs. This causes a lot of data to be retrieved + # from the master, so we only do it once every config_update_period + # and we cache the results + next_config_update = time.time() + + # monitors for vms running on this host. + # This dictionary uses uuids to lookup each monitor object + vm_mon_lookup = {} + + # monitors for srs plugged on this host + # This dictionary uses uuids to lookup each monitor object + sr_mon_lookup = {} + + # The monitor for the host + host_mon = None + + runs = 0 + while True: + print_debug("Run: %d" % runs) + + # Get new updates - and catch any http errors + try: + # if session has failed on last run we need to restart it + if restart_session: + session = XapiSession() + restart_session = False + + rrd_updates.refresh(session) + + # Should we update all_xmlconfigs + if time.time() >= next_config_update: + print_debug("Updating all_xmlconfigs") + # yes - update all the xml configs: + # this generates a few LARGE xapi messages from the master + update_all_xmlconfigs(session) + + # Set time when to do this next + next_config_update = time.time() + config_update_period + + # List of VMs present in rrd_updates + vm_uuid_list = rrd_updates.get_uuid_list_by_objtype("vm") + + # Remove any monitors for VMs no longer listed in rrd_updates page + # We use .pop() inside the loop, use list(dict_var.keys()): + for uuid in list(vm_mon_lookup.keys()): + if uuid not in vm_uuid_list: + vm_mon_lookup.pop(uuid) + + # Create monitors for VMs that have just appeared in rrd_updates page + for uuid in vm_uuid_list: + if uuid not in vm_mon_lookup: + vm_mon_lookup[uuid] = VMMonitor(uuid) + else: + # check if the config has changed, e.g. by XenCenter + vm_mon_lookup[uuid].refresh_config() + + # Remove monitor for the host if it's no longer listed in rrd_updates page + # Create monitor for the host if it has just appeared in rrd_updates page + try: + host_uuid = rrd_updates.get_uuid_list_by_objtype("host")[ + 0 + ] # should only ever be one of these + except Exception: + # list may be empty! + host_uuid = None + + if not host_uuid: + host_mon = None + elif not host_mon: + host_mon = HOSTMonitor(host_uuid) + elif host_mon.uuid != host_uuid: + raise PerfMonException( + "host uuid in rrd_updates changed (old: %s, new %s)" + % (host_mon.uuid, host_uuid) + ) + else: + # check if the config has changed, e.g. by XenCenter + host_mon.refresh_config() + + # List of SRs present in rrd_updates + sr_uuid_list = rrd_updates.get_uuid_list_by_objtype("sr") + print_debug("sr_uuid_list = %s" % sr_uuid_list) + + # Remove monitors for SRs no longer listed in the rrd_updates page + # We use .pop() inside the loop, use list(dict_var.keys()): + for uuid in list(sr_mon_lookup.keys()): + if uuid not in sr_uuid_list: + sr_mon_lookup.pop(uuid) + # Create monitors for SRs that have just appeared in rrd_updates page + for uuid in sr_uuid_list: + if uuid not in sr_mon_lookup: + sr_mon_lookup[uuid] = SRMonitor(uuid) + else: + sr_mon_lookup[uuid].refresh_config() + + # Go through each vm_mon and update it using the rrd_udpates + # this may generate alarms + for vm_mon in vm_mon_lookup.values(): + vm_mon.process_rrd_updates(rrd_updates, session) + + # Ditto for the host_mon + if host_mon: + host_mon.process_rrd_updates(rrd_updates, session) + + # And for the sr_mons + for sr_mon in sr_mon_lookup.values(): + sr_mon.process_rrd_updates(rrd_updates, session) + + except ConnectionRefusedError as e: + # "Connection refused[111]" + # this happens when we try to restart session and *that* fails + time.sleep(2) + log_err( + "caught connection refused error: (%s) - restarting XAPI session" + % str(e) + ) + restart_session = True + except urllib.error.HTTPError as e: + if e.code in (401, 500): + # Error getting rrd_updates: 401=Unauthorised, 500=Internal + # start new session + log_err("caught http.error: (%s) - restarting XAPI session" % str(e)) + restart_session = True + else: + # Don't know why we got this error - crash, die and look at logs later + raise + except OSError as e: + # This happens if we send messages or + # read other-config:perfmon after xapi is restarted + log_err("caught connection error: (%s) - restarting XAPI session" % str(e)) + restart_session = True + + runs += 1 + if maxruns is not None and runs >= maxruns: + break + + # Force collection of cyclically referenced objects cos we don't + # trust GC to do it on its own + gc.collect() + + # Sleep for interval + dither, exiting early if we recv a cmd + timeout = rand(interval, interval + dither) + cmdsock.settimeout(timeout) + try: + cmd = cmdsock.recv(cmdmaxlen).decode() + except socket.timeout: + pass + else: + if cmd == "refresh": + # This forces a re-read of all the configs on the next loop + next_config_update = time.time() + elif cmd == "debug_mem": + debug_mem() + else: + log_err("received unhandled command %s" % cmd) + + # continue to next run + + return 0 + + +def sigterm_handler(sig, _): # pragma: no cover + log_err("Caught signal %d - exiting" % sig) + sys.exit(1) + + +pidfile = "/var/run/perfmon.pid" + +if __name__ == "__main__": # pragma: no cover + + # setup signal handler to print out notice when killed + signal.signal(signal.SIGTERM, sigterm_handler) + + if "--daemon" in sys.argv[1:]: + sys.argv.remove("--daemon") + if os.fork() != 0: + sys.exit(0) + os.setsid() + # For /dev/null, `encoding` and `with` is not needed + # pylint: disable=unspecified-encoding, consider-using-with + sys.stdout = open("/dev/null", "w") + sys.stdin = open("/dev/null", "r") + sys.stderr = sys.stdout + + # Exit if perfmon already running + if os.path.exists(pidfile): + with open(pidfile, encoding="utf-8") as file: + pid = file.read() + + if os.path.exists("/proc/%s" % pid): + log_err("perfmon already running - exiting") + sys.exit(3) + + try: + # Write out pidfile + with open(pidfile, "w", encoding="utf-8") as fd: + fd.write("%d" % os.getpid()) + + # run the main loop + rc = main() + + except UsageException: + # Print the usage + log_err( + "usage: %s [-i -n -d -s -c" \ + " -D ] \\\n" + "\t[--interval= --numloops= --debug \\\n" + "\t --rrdstep= --daemon]\n" + "\t --config_update_period=\n" + "\t --interval_percent_dither=\n" + " interval:\tseconds between reads of http://localhost/rrd_updates?...\n" + " loops:\tnumber of times to run before exiting\n" + " rrd_step:\tseconds between samples provided by rrd_updates." \ + " Valid values are 5 or 60\n" + " config_update_period:\tseconds between getting updates" \ + " of all VM/host records from master\n" + " interval_percent_dither:\tmax percent dither in each loop" \ + " - prevents stampede on master\n" + % (sys.argv[0]) + ) + rc = 1 + + except SystemExit: + # we caught a signal which we have already logged + pass + + except Exception as exp: + rc = 2 + log_err("FATAL ERROR: perfmon will exit") + log_err("Exception is of class %s" % exp.__class__) + ex = sys.exc_info() + err = traceback.format_exception(*ex) + + # XenAPI.Failure has `details`. + try: + # print the exception args nicely + log_err(str(exp)) + except Exception: + try: + err_msg = "\n".join([str(x) for x in exp.details]) + # print the exception args nicely + log_err(err_msg) + except Exception: + pass + + # now log the traceback to syslog + for exline in err: + log_err(exline) + + # remove pidfile and exit + os.unlink(pidfile) + sys.exit(rc) diff --git a/scripts/static-vdis b/python3/bin/static-vdis similarity index 90% rename from scripts/static-vdis rename to python3/bin/static-vdis index 77c9790b71e..ff3a01da596 100755 --- a/scripts/static-vdis +++ b/python3/bin/static-vdis @@ -3,10 +3,22 @@ # Common functions for managing statically-attached (ie onboot, without xapi) VDIs -import sys, os, subprocess, json, urllib.parse +import json +import os import os.path +import subprocess +import sys import time -import XenAPI, inventory, xmlrpc.client +import urllib.parse +import xmlrpc.client +from typing import TYPE_CHECKING, cast + +import XenAPI + +import inventory + +if TYPE_CHECKING: + from typing import Any, Dict main_dir = "/etc/xensource/static-vdis" @@ -77,6 +89,7 @@ def check_clusterstack(ty): wait_for_corosync_quorum() def sr_attach(ty, device_config): + # type: (str, Dict[str, object]) -> str check_clusterstack(ty) args = [arg for (k,v) in device_config.items() @@ -84,25 +97,25 @@ def sr_attach(ty, device_config): return call_volume_plugin(ty, "SR.attach", args) def list_vdis(): - all = [] + files = [] try: - all = os.listdir(main_dir) - except: + files = os.listdir(main_dir) + except OSError: # All possible errors are subclasses of OSError pass - return list(map(load, all)) + return list(map(load, files)) def fresh_name(): - all = [] + """Return a unique name for a new static VDI configuration directory""" try: - all = os.listdir(main_dir) - for i in range(0, len(all) + 1): # guarantees to find a unique number + files = os.listdir(main_dir) + for i in range(0, len(files) + 1): # guarantees to find a unique number i = str(i) - if not(i in all): + if i not in files: return i - except: + except OSError: # All possible errors are subclasses of OSError # Directory doesn't exist os.mkdir(main_dir) - return "0" + return "0" # Always return a string, fixes pyright error by not returning None def to_string_list(d): @@ -153,6 +166,7 @@ def add(session, vdi_uuid, reason): sm = None all_sm = session.xenapi.SM.get_all_records() + sm_ref = "" # pragma: no cover for sm_ref in all_sm: if all_sm[sm_ref]['type'] == ty: sm = all_sm[sm_ref] @@ -170,6 +184,7 @@ def add(session, vdi_uuid, reason): if "VDI_ATTACH_OFFLINE" in sm["features"]: data["volume-plugin"] = ty data[smapiv3_config] = json.dumps(device_config) + assert device_config # must not be None sr = sr_attach(ty, device_config) location = session.xenapi.VDI.get_location(vdi) stat = call_volume_plugin(ty, "Volume.stat", [ sr, location ]) @@ -225,7 +240,7 @@ def call_backend_attach(driver, config): xml = doexec(args) if xml[0] != 0: raise Exception("SM_BACKEND_FAILURE(%d, %s, %s)" % xml) - xml_rpc = xmlrpc.client.loads(xml[1]) + xml_rpc = xmlrpc.client.loads(xml[1]) # type: Any # pragma: no cover if 'params_nbd' in xml_rpc[0][0]: # Prefer NBD if available @@ -238,7 +253,7 @@ def call_backend_attach(driver, config): return path def call_backend_detach(driver, config): - params = xmlrpc.client.loads(config)[0][0] + params = xmlrpc.client.loads(config)[0][0] # type: Any params['command'] = 'vdi_detach_from_config' config = xmlrpc.client.dumps(tuple([params]), params['command']) xml = doexec([ driver, config ]) @@ -246,8 +261,8 @@ def call_backend_detach(driver, config): raise Exception("SM_BACKEND_FAILURE(%d, %s, %s)" % xml) xml_rpc = xmlrpc.client.loads(xml[1]) try: - res = xml_rpc[0][0]['params'] - except: + res = cast(dict, xml_rpc[0][0])['params'] # pragma: no cover + except Exception: res = xml_rpc[0][0] return res @@ -288,7 +303,7 @@ def attach(vdi_uuid): os.unlink(d + "/disk") except: pass - path = None + path = None # Raise TypeError if path is not set at the end if not (os.path.exists(d + "/" + smapiv3_config)): # SMAPIv1 config = read_whole_file(d + "/config") @@ -320,10 +335,13 @@ def attach(vdi_uuid): (path, exportname) = parse_nbd_uri(uri) path = connect_nbd(path=path, exportname=exportname) + if path is None: + raise TypeError("static-vdis: attach(): path was not set") os.symlink(path, d + "/disk") return d + "/disk" if not found: raise Exception("Disk configuration not found") + return None def detach(vdi_uuid): found = False @@ -362,8 +380,9 @@ def usage(): print(" %s attach -- attach the VDI immediately" % sys.argv[0]) print(" %s detach -- detach the VDI immediately" % sys.argv[0]) sys.exit(1) - -if __name__ == "__main__": + + +def main(): if len(sys.argv) < 2: usage() @@ -382,10 +401,13 @@ if __name__ == "__main__": elif sys.argv[1] == "del" and len(sys.argv) == 3: delete(sys.argv[2]) elif sys.argv[1] == "attach" and len(sys.argv) == 3: - path = attach(sys.argv[2]) - print(path) + disk_path = attach(sys.argv[2]) + print(disk_path) elif sys.argv[1] == "detach" and len(sys.argv) == 3: detach(sys.argv[2]) else: usage() - + + +if __name__ == "__main__": # pragma: no cover + main() diff --git a/python3/bin/xe-reset-networking b/python3/bin/xe-reset-networking new file mode 100755 index 00000000000..c1c3e38d283 --- /dev/null +++ b/python3/bin/xe-reset-networking @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 + +""" +Copyright (C) 2006-2009 Citrix Systems Inc. +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published +by the Free Software Foundation; version 2.1 only. with the special +exception on linking described in file LICENSE. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. +""" +from __future__ import print_function + +import os +import re +import sys +from contextlib import contextmanager +from optparse import OptionParser + +pool_conf = '@ETCXENDIR@/pool.conf' +inventory_file = '@INVENTORY@' +management_conf = '/etc/firstboot.d/data/management.conf' +network_reset = '/var/tmp/network-reset' + + +@contextmanager +def fsync_write(filename): + """Context manager that writes to a file and fsyncs it after writing.""" + + with open(filename, "w", encoding="utf-8") as file: + try: # Run the context, ignoring exceptions: + yield file + finally: + file.flush() # Flush the file buffer to the OS + os.fsync(file.fileno()) # Ask the OS to write the file to disk + + +def read_dict_file(fname): + f = open(fname, 'r') + d = {} + for l in f.readlines(): + kv = l.split('=') + d[kv[0].strip()] = kv[1].strip().strip("'") + return d + +def read_inventory(): + return read_dict_file(inventory_file) + +def read_management_conf(): + return read_dict_file(management_conf) + + +def write_inventory(inventory_dict): + with fsync_write(inventory_file) as file: + for k in inventory_dict: + file.write(k + "='" + inventory_dict[k] + "'\n") + + +def valid_vlan(vlan): + if not re.match(r"^\d+$", vlan): + return False + if int(vlan)<0 or int(vlan)>4094: + return False + return True + +if __name__ == "__main__": + parser = OptionParser() + parser.add_option("-m", "--master", help="Master's address", dest="address", default=None) + parser.add_option("--device", help="Device name of new management interface", dest="device", default=None) + parser.add_option("--mode", help='IP configuration mode for new management interface: "none", "dhcp" or "static" (default is dhcp)', dest="mode", default="dhcp") + parser.add_option("--mode-v6", help='IPv6 configuration mode for new management interface: "none", "dhcp", "autoconf" or "static" (default is none)', dest="mode_v6", default="none") + parser.add_option("--novlan", help="no vlan is used for new management interface", dest="novlan", action="store_const", const=True, default=False) + parser.add_option("--vlan", help="vlanID for new management interface to be on vlan network", dest="vlan", default=None) + parser.add_option("--ip", help="IP address for new management interface", dest="ip", default='') + parser.add_option("--ipv6", help="IPv6 address (CIDR format) for new management interface", dest="ipv6", default='') + parser.add_option("--netmask", help="Netmask for new management interface", dest="netmask", default='') + parser.add_option("--gateway", help="Gateway for new management interface", dest="gateway", default='') + parser.add_option("--gateway-v6", help="IPv6 Gateway for new management interface", dest="gateway_v6", default='') + parser.add_option("--dns", help="DNS server for new management interface", dest="dns", default='') + (options, args) = parser.parse_args() + + # Determine pool role + try: + f = open(pool_conf, 'r') + try: + l = f.readline() + ls = l.split(':') + if ls[0].strip() == 'master': + master = True + address = 'localhost' + else: + master = False + if options.address == None: + address = ls[1].strip() + else: + address = options.address + finally: + f.close() + except Exception: + master = None + address = "" + + # Get the management device from the firstboot data if not specified by the user + if options.device == None: + try: + conf = read_management_conf() + device = conf['LABEL'] + except: + print("Could not figure out which interface should become the management interface. \ + Please specify one using the --device option.") + sys.exit(1) + else: + device = options.device + + # Get the VLAN if provided in the firstboot data and not specified by the user + vlan = None + if options.vlan: + if options.novlan: + parser.error('"--vlan " and "--novlan" should not be used together') + sys.exit(1) + if not valid_vlan(options.vlan): + print("VLAN tag you gave was invalid, It must be between 0 and 4094") + sys.exit(1) + vlan = options.vlan + elif not options.novlan: + try: + conf = read_management_conf() + vlan = conf['VLAN'] + except KeyError: + pass + + # Determine IP configuration for management interface + options.mode = options.mode.lower() + if options.mode not in ["none", "dhcp", "static"]: + parser.error('mode should be either "none", "dhcp" or "static"') + sys.exit(1) + + options.mode_v6 = options.mode_v6.lower() + if options.mode not in ["none", "autoconf", "dhcp", "static"]: + parser.error('mode-v6 should be either "none", "autoconf", "dhcp" or "static"') + sys.exit(1) + + if options.mode == "none" and options.mode_v6 == "none": + parser.error("Either mode or mode-v6 must be not 'none'") + sys.exit(1) + + if options.mode == 'static' and (options.ip == '' or options.netmask == ''): + parser.error("if static IP mode is selected, an IP address and netmask need to be specified") + sys.exit(1) + + if options.mode_v6 == 'static': + if options.ipv6 == '': + parser.error("if static IPv6 mode is selected, an IPv6 address needs to be specified") + elif options.ipv6.find('/') == -1: + parser.error("Invalid format: IPv6 must be specified with CIDR format: /") + sys.exit(1) + + # Warn user + if not os.access('/tmp/fist_network_reset_no_warning', os.F_OK): + configuration = [] + configuration.append("Management interface: " + device) + configuration.append("IP configuration mode: " + options.mode) + configuration.append("IPv6 configuration mode:" + options.mode_v6) + if vlan != None: + configuration.append("Vlan: " + vlan) + if options.mode == "static": + configuration.append("IP address: " + options.ip) + configuration.append("Netmask: " + options.netmask) + if options.mode_v6 == "static": + configuration.append("IPv6/CIDR: " + options.ipv6) + if options.gateway != '': + configuration.append("Gateway: " + options.gateway) + if options.gateway_v6 != '': + configuration.append("IPv6 gateway: " + options.gateway_v6) + if options.dns != '': + configuration.append("DNS server(s): " + options.dns) + if master == False: + configuration.append("Pool master's address: " + address) + warning = """---------------------------------------------------------------------- +!! WARNING !! + +This command will reboot the host and reset its network configuration. +Any running VMs will be forcefully shutdown. + +Before completing this command: +- Where possible, cleanly shutdown all VMs running on this host. +- Disable HA if this host is part of a resource pool with HA enabled. +---------------------------------------------------------------------- + +Your network will be re-configured as follows:\n\n""" + confirmation = """\n\nIf you want to change any of the above settings, type 'no' and re-run +the command with appropriate arguments (use --help for a list of options). + +Type 'yes' to continue. +Type 'no' to cancel. +""" + res = input(warning + '\n'.join(configuration) + confirmation) + if res != 'yes': + sys.exit(1) + + # Update master's IP, if needed and given + if master == False and options.address != None: + print("Setting master's ip (" + address + ")...") + with fsync_write(pool_conf) as f: + f.write('slave:' + address) + + # Construct bridge name for management interface based on convention + if device[:3] == 'eth': + bridge = 'xenbr' + device[3:] + else: + bridge = 'br' + device + + # Ensure xapi is not running + print("Stopping xapi...") + os.system('service xapi stop >/dev/null 2>/dev/null') + + # Reconfigure new management interface + print("Reconfiguring " + device + "...") + os.system('systemctl stop xcp-networkd >/dev/null 2>/dev/null') + try: + os.remove('/var/lib/xcp/networkd.db') + except Exception as e: + print('Warning: Failed to delete networkd.db.\n%s' % e) + + # Update interfaces in inventory file + print('Updating inventory file...') + inventory = read_inventory() + if vlan != None: + inventory['MANAGEMENT_INTERFACE'] = 'xentemp' + else: + inventory['MANAGEMENT_INTERFACE'] = bridge + inventory['CURRENT_INTERFACES'] = '' + write_inventory(inventory) + + # Rewrite firstboot management.conf file, which will be picked it by xcp-networkd on restart (if used) + is_static = False + with fsync_write(management_conf) as f: + f.write("LABEL='" + device + "'\n") + if options.mode != "none": + f.write("MODE='" + options.mode + "'\n") + if options.mode_v6 != "none": + f.write("MODEV6='" + options.mode_v6 + "'\n") + if vlan != None: + f.write("VLAN='" + vlan + "'\n") + if options.mode == 'static': + is_static = True + f.write("IP='" + options.ip + "'\n") + f.write("NETMASK='" + options.netmask + "'\n") + if options.gateway != '': + f.write("GATEWAY='" + options.gateway + "'\n") + if options.mode_v6 == "static": + is_static = True + f.write("IPv6='" + options.ipv6 + "'\n") + if options.gateway_v6 != '': + f.write("IPv6_GATEWAY='" + options.gateway_v6 + "'\n") + if is_static and options.dns != '': + f.write("DNS='" + options.dns + "'\n") + + # Write trigger file for XAPI to continue the network reset on startup + with fsync_write(network_reset) as f: + f.write('DEVICE=' + device + '\n') + if options.mode != "none": + f.write('MODE=' + options.mode + '\n') + if options.mode_v6 != "none": + f.write('MODE_V6=' + options.mode_v6 + '\n') + if vlan != None: + f.write('VLAN=' + vlan + '\n') + if options.mode == 'static': + f.write('IP=' + options.ip + '\n') + f.write('NETMASK=' + options.netmask + '\n') + if options.gateway != '': + f.write('GATEWAY=' + options.gateway + '\n') + if options.mode_v6 == "static": + f.write('IPV6=' + options.ipv6 + '\n') + if options.gateway_v6 != '': + f.write('GATEWAY_V6=' + options.gateway_v6 + '\n') + if is_static and options.dns != '': + f.write('DNS=' + options.dns + '\n') + + # Reset the domain 0 network interface naming configuration + # back to a fresh-install state for the currently-installed + # hardware. + os.system("/etc/sysconfig/network-scripts/interface-rename.py --reset-to-install") + + # Reboot + os.system("mount -o remount,rw / && reboot -f") diff --git a/scripts/xe-scsi-dev-map b/python3/bin/xe-scsi-dev-map similarity index 100% rename from scripts/xe-scsi-dev-map rename to python3/bin/xe-scsi-dev-map diff --git a/python3/dnf_plugins/ptoken.py b/python3/dnf_plugins/ptoken.py index 75c926e13b4..c2ea73fccc8 100644 --- a/python3/dnf_plugins/ptoken.py +++ b/python3/dnf_plugins/ptoken.py @@ -15,7 +15,7 @@ def config(self): """ DNF plugin config hook, refer to https://dnf.readthedocs.io/en/latest/api_plugins.html""" try: - with open('/etc/xensource/ptoken', encoding="utf-8") as file: + with open(PTOKEN_PATH, encoding="utf-8") as file: ptoken = file.read().strip() except Exception: #pylint: disable=broad-exception-caught logging.error("Failed to open %s", PTOKEN_PATH) diff --git a/scripts/examples/python/Makefile b/python3/examples/Makefile similarity index 95% rename from scripts/examples/python/Makefile rename to python3/examples/Makefile index 251f747250d..ac84bf6ba77 100644 --- a/scripts/examples/python/Makefile +++ b/python3/examples/Makefile @@ -8,5 +8,4 @@ build: SETUPTOOLS_SCM_PRETEND_VERSION=$(XAPI_VERSION) python -m build --sdist . clean: - dune clean rm -rf dist/ build/ XenAPI.egg-info/ diff --git a/scripts/examples/python/PACKAGING.md b/python3/examples/PACKAGING.md similarity index 100% rename from scripts/examples/python/PACKAGING.md rename to python3/examples/PACKAGING.md diff --git a/scripts/examples/python/README.md b/python3/examples/README.md similarity index 100% rename from scripts/examples/python/README.md rename to python3/examples/README.md diff --git a/scripts/examples/python/XenAPI/XenAPI.py b/python3/examples/XenAPI/XenAPI.py similarity index 94% rename from scripts/examples/python/XenAPI/XenAPI.py rename to python3/examples/XenAPI/XenAPI.py index bc4e6179142..e37f8813b6e 100644 --- a/scripts/examples/python/XenAPI/XenAPI.py +++ b/python3/examples/XenAPI/XenAPI.py @@ -58,13 +58,8 @@ import os import socket import sys - -if sys.version_info[0] == 2: - import httplib as httplib - import xmlrpclib as xmlrpclib -else: - import http.client as httplib - import xmlrpc.client as xmlrpclib +import http.client as httplib +import xmlrpc.client as xmlrpclib otel = False try: @@ -147,18 +142,13 @@ class Session(xmlrpclib.ServerProxy): session.xenapi.session.logout() """ - def __init__(self, uri, transport=None, encoding=None, verbose=0, - allow_none=1, ignore_ssl=False): + def __init__(self, uri, transport=None, encoding=None, verbose=False, + allow_none=True, ignore_ssl=False): - if sys.version_info[0] > 2: - # this changed to be a 'bool' in Python3 - verbose = bool(verbose) - allow_none = bool(allow_none) + verbose = bool(verbose) + allow_none = bool(allow_none) - # Fix for CA-172901 (+ Python 2.4 compatibility) - # Fix for context=ctx ( < Python 2.7.9 compatibility) - if not (sys.version_info[0] <= 2 and sys.version_info[1] <= 7 and sys.version_info[2] <= 9 ) \ - and ignore_ssl: + if ignore_ssl: import ssl ctx = ssl._create_unverified_context() xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, diff --git a/scripts/examples/python/XenAPI/__init__.py b/python3/examples/XenAPI/__init__.py similarity index 100% rename from scripts/examples/python/XenAPI/__init__.py rename to python3/examples/XenAPI/__init__.py diff --git a/scripts/examples/python/XenAPIPlugin.py b/python3/examples/XenAPIPlugin.py similarity index 82% rename from scripts/examples/python/XenAPIPlugin.py rename to python3/examples/XenAPIPlugin.py index 87d8c23c12b..49998457783 100644 --- a/scripts/examples/python/XenAPIPlugin.py +++ b/python3/examples/XenAPIPlugin.py @@ -7,16 +7,11 @@ from __future__ import print_function import sys - +import xmlrpc.client as xmlrpclib import XenAPI -if sys.version_info[0] == 2: - import xmlrpclib -else: - import xmlrpc.client as xmlrpclib - class Failure(Exception): - """Provide compatibilty with plugins written against XenServer 5.5 API""" + """Provide compatibility with plugins written against the XenServer 5.5 API""" def __init__(self, code, params): Exception.__init__(self) @@ -44,9 +39,6 @@ def dispatch(fn_table): try: result = fn_table[methodname](x, args) print(success_message(result)) - except SystemExit: - # SystemExit should not be caught, as it is handled elsewhere in the plugin system. - raise except Failure as e: print(failure_message(e.params)) except Exception as e: diff --git a/scripts/examples/python/pyproject.toml b/python3/examples/pyproject.toml similarity index 89% rename from scripts/examples/python/pyproject.toml rename to python3/examples/pyproject.toml index f556f2539ab..5a429e1a0c7 100644 --- a/scripts/examples/python/pyproject.toml +++ b/python3/examples/pyproject.toml @@ -3,4 +3,4 @@ requires = ["setuptools >= 38.6.0", "setuptools_scm[toml]", "wheel"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] -root = "../../.." +root = "../.." diff --git a/scripts/examples/python/setup.cfg b/python3/examples/setup.cfg similarity index 69% rename from scripts/examples/python/setup.cfg rename to python3/examples/setup.cfg index 059e6631bd1..b2c23c40369 100644 --- a/scripts/examples/python/setup.cfg +++ b/python3/examples/setup.cfg @@ -19,10 +19,4 @@ classifiers = [options] packages = find: -python_requires = >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4 - -[bdist_wheel] -# This flag says that the code is written to work on both Python 2 and Python -# 3. If at all possible, it is good practice to do this. If you cannot, you -# will need to generate wheels for each Python version that you support. -universal=1 +python_requires = >=3.6, <4 diff --git a/python3/extensions/Test.test b/python3/extensions/Test.test new file mode 100755 index 00000000000..372de668b8c --- /dev/null +++ b/python3/extensions/Test.test @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + + +import sys +import xmlrpc.client + + +def success_message(result): + rpcparams = {"Status": "Success", "Value": result} + return xmlrpc.client.dumps((rpcparams,), "", True) + + +def failure_message(code, params): + rpcparams = {"Status": "Failure", "ErrorDescription": [code] + params} + return xmlrpc.client.dumps((rpcparams,), "", True) + + +if __name__ == "__main__": + txt = sys.stdin.read() + req = xmlrpc.client.loads(txt) + print(failure_message("CODE", ["a", "b"])) + # print (success_message("")) diff --git a/scripts/extensions/pool_update.apply b/python3/extensions/pool_update.apply similarity index 80% rename from scripts/extensions/pool_update.apply rename to python3/extensions/pool_update.apply index c860d965169..96d00adcf32 100644 --- a/scripts/extensions/pool_update.apply +++ b/python3/extensions/pool_update.apply @@ -1,19 +1,29 @@ #!/usr/bin/env python3 -import xmlrpc.client -import sys -import XenAPI -import json -import traceback -import subprocess +import errno +import logging import os import re -import fasteners -import errno import shutil -import logging +import subprocess +import sys +import xmlrpc.client + +import fasteners import xcp.logger +import XenAPI + +TMP_DIR = "/tmp/" +UPDATE_ALREADY_APPLIED = "UPDATE_ALREADY_APPLIED" +UPDATE_APPLY_FAILED = "UPDATE_APPLY_FAILED" +OTHER_OPERATION_IN_PROGRESS = "OTHER_OPERATION_IN_PROGRESS" +UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR = "UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR" +CANNOT_FIND_UPDATE = "CANNOT_FIND_UPDATE" +INVALID_UPDATE = "INVALID_UPDATE" +ERROR_MESSAGE_DOWNLOAD_PACKAGE = "Error downloading packages:\n" +ERROR_MESSAGE_START = "Error: " +ERROR_MESSAGE_END = "You could try " TMP_DIR = '/tmp/' UPDATE_ALREADY_APPLIED = 'UPDATE_ALREADY_APPLIED' @@ -32,12 +42,15 @@ PKG_MGR = DNF_CMD if os.path.exists(DNF_CMD) else YUM_CMD class EnvironmentFailure(Exception): """Failure due to running environment""" + class ApplyFailure(Exception): """Failed to apply update""" + class InvalidUpdate(Exception): """Update is invalid""" + def success_message(): """success message to return""" rpcparams = {'Status': 'Success', 'Value': ''} @@ -55,7 +68,7 @@ def failure_message(code, args): def execute_apply(update_package, yum_conf_file): """apply update""" yum_env = os.environ.copy() - yum_env['LANG'] = 'C' + yum_env["LANG"] = "C" cmd = [PKG_MGR, 'clean', 'all', '--noplugins', '-c', yum_conf_file] # pylint: disable=consider-using-with @@ -63,7 +76,7 @@ def execute_apply(update_package, yum_conf_file): stderr=subprocess.STDOUT, close_fds=True, env=yum_env, universal_newlines=True) output, _ = p.communicate() - for line in output.split('\n'): + for line in output.split("\n"): xcp.logger.info(line) if p.returncode != 0: raise EnvironmentFailure("Error cleaning yum cache") @@ -75,22 +88,22 @@ def execute_apply(update_package, yum_conf_file): p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, env=yum_env, universal_newlines=True) output, _ = p.communicate() - xcp.logger.info('pool_update.apply %r returncode=%r output:', cmd, p.returncode) - for line in output.split('\n'): + xcp.logger.info("pool_update.apply %r returncode=%r output:", cmd, p.returncode) + for line in output.split("\n"): xcp.logger.info(line) if p.returncode != 0: if ERROR_MESSAGE_DOWNLOAD_PACKAGE in output: - raise InvalidUpdate('Missing package(s) in the update.') + raise InvalidUpdate("Missing package(s) in the update.") - m = re.search('(?<=' + ERROR_MESSAGE_START + ').+$', output, flags=re.DOTALL) + m = re.search("(?<=" + ERROR_MESSAGE_START + ").+$", output, flags=re.DOTALL) if m: errmsg = m.group() - errmsg = re.sub(ERROR_MESSAGE_END + '.+', '', errmsg, flags=re.DOTALL) + errmsg = re.sub(ERROR_MESSAGE_END + ".+", "", errmsg, flags=re.DOTALL) raise ApplyFailure(errmsg) raise ApplyFailure(output) -if __name__ == '__main__': +if __name__ == "__main__": xcp.logger.logToSyslog(level=logging.INFO) txt = sys.stdin.read() params, method = xmlrpc.client.loads(txt) @@ -101,27 +114,29 @@ if __name__ == '__main__': lock_acquired = False try: session = XenAPI.xapi_local() - session.xenapi.login_with_password('root', '', '', 'Pool_update') + session.xenapi.login_with_password("root", "", "", "Pool_update") update = params[1] host = params[2] # Check if the update has been applied. if update in session.xenapi.host.get_updates(host): - print(failure_message( - UPDATE_ALREADY_APPLIED, [update])) + print(failure_message(UPDATE_ALREADY_APPLIED, [update])) sys.exit(0) update_uuid = session.xenapi.pool_update.get_uuid(update) - yum_conf_file = os.path.join(TMP_DIR, update_uuid, 'yum.conf') + yum_conf_file = os.path.join(TMP_DIR, update_uuid, "yum.conf") # To prevent the race condition of invoking apply, set a lock. - lock_file = os.path.join(TMP_DIR, update_uuid + '.lck') + lock_file = os.path.join(TMP_DIR, update_uuid + ".lck") lock = fasteners.InterProcessLock(lock_file) lock_acquired = lock.acquire(blocking=False) if not lock_acquired: - print(failure_message( - OTHER_OPERATION_IN_PROGRESS, ['Applying the update', update])) + print( + failure_message( + OTHER_OPERATION_IN_PROGRESS, ["Applying the update", update] + ) + ) sys.exit(0) # Run precheck @@ -154,7 +169,7 @@ if __name__ == '__main__': pass else: raise - with open (yum_conf_file, "w+") as file: + with open(yum_conf_file, "w+") as file: file.write("{0}".format(yum_conf)) execute_apply('@update', yum_conf_file) diff --git a/scripts/extensions/pool_update.precheck b/python3/extensions/pool_update.precheck similarity index 99% rename from scripts/extensions/pool_update.precheck rename to python3/extensions/pool_update.precheck index 57c6596f280..8e5f1858668 100755 --- a/scripts/extensions/pool_update.precheck +++ b/python3/extensions/pool_update.precheck @@ -1,23 +1,23 @@ #!/usr/bin/env python3 -import xmlrpc.client -import sys -import XenAPI -import json -import urllib.request, urllib.error, urllib.parse -import xml.dom.minidom -import traceback -import subprocess -import os +import configparser import errno -import re -import shutil import io -import configparser import logging -import xcp.logger +import os +import re +import shutil +import subprocess +import sys +import urllib.error +import urllib.parse +import urllib.request +import xml.dom.minidom +import xmlrpc.client +import xcp.logger +import XenAPI TMP_DIR = '/tmp/' UPDATE_DIR = '/var/update/' @@ -248,6 +248,7 @@ if __name__ == '__main__': session = None update_package = None update = None + yum_conf_file = "" try: session = XenAPI.xapi_local() session.xenapi.login_with_password('root', '', '', 'Pool_update') diff --git a/scripts/backup-sr-metadata.py b/python3/libexec/backup-sr-metadata.py similarity index 71% rename from scripts/backup-sr-metadata.py rename to python3/libexec/backup-sr-metadata.py index 2464d5c8761..8f83a9b06cb 100644 --- a/scripts/backup-sr-metadata.py +++ b/python3/libexec/backup-sr-metadata.py @@ -1,54 +1,58 @@ -#!/usr/bin/python +#!/usr/bin/python3 # Back up the SR metadata and VDI list into an XML file # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 import atexit -import XenAPI -import sys -import getopt import codecs -from xml.dom.minidom import Document +import contextlib +import getopt +import sys +from xml.dom.minidom import Document # pytype: disable=pyi-error + +import XenAPI -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) def usage(): - print >> sys.stderr, "%s [-f ]" % sys.argv[0] - sys.exit(1) + print("%s [-f ]" % sys.argv[0], file=sys.stderr) + def set_if_exists(xml, record, key): - if record.has_key(key): + if key in record: xml.setAttribute(key, record[key]) else: xml.setAttribute(key, "") - + def main(argv): session = XenAPI.xapi_local() + + def logout(): + with contextlib.suppress(Exception): + session.xenapi.session.logout() + + atexit.register(logout) + session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-backup-sr-metadata") try: - opts, args = getopt.getopt(argv, "hf:", []) - except getopt.GetoptError, err: - print str(err) + opts, _ = getopt.getopt(argv, "hf:", []) + except getopt.GetoptError as err: + print(err) usage() + sys.exit(1) outfile = None for o,a in opts: if o == "-f": outfile = a - if outfile == None: + if outfile is None: usage() + sys.exit(1) f = codecs.open(outfile, 'w', encoding="utf-8") srs = session.xenapi.SR.get_all_records() - vdis = session.xenapi.SR.get_all_records() - + doc = Document() metaxml = doc.createElement("meta") @@ -60,18 +64,18 @@ def main(argv): set_if_exists(srxml, srrec, 'uuid') set_if_exists(srxml, srrec, 'name_label') set_if_exists(srxml, srrec, 'name_description') - + for vdiref in srrec['VDIs']: - try: + try: vdirec = session.xenapi.VDI.get_record(vdiref) vdixml = doc.createElement("vdi") set_if_exists(vdixml, vdirec, 'uuid') set_if_exists(vdixml, vdirec, 'name_label') set_if_exists(vdixml, vdirec, 'name_description') srxml.appendChild(vdixml) - except: - print >> sys.stderr, "Failed to get VDI record for: %s" % vdiref - + except Exception: + print("Failed to get VDI record for: %s" % vdiref, file=sys.stderr) + metaxml.appendChild(srxml) doc.writexml(f, encoding="utf-8") @@ -80,5 +84,3 @@ def main(argv): if __name__ == "__main__": main(sys.argv[1:]) - - diff --git a/scripts/host-display b/python3/libexec/host-display similarity index 100% rename from scripts/host-display rename to python3/libexec/host-display diff --git a/scripts/link-vms-by-sr.py b/python3/libexec/link-vms-by-sr.py similarity index 60% rename from scripts/link-vms-by-sr.py rename to python3/libexec/link-vms-by-sr.py index 98fcfa587ed..0df20b4bf97 100755 --- a/scripts/link-vms-by-sr.py +++ b/python3/libexec/link-vms-by-sr.py @@ -1,5 +1,21 @@ #!/usr/bin/env python3 -# Populate a directory of symlinks partitioning VMs by SR +""" +link-vms-by-sr.py - Populate the given input_directory with VM metadata files, + and create a directory structure of + symlinks to the metadata files, partitioning VMs by SR UUID. + +Usage: + link-vms-by-sr.py -d + +The script uses the XenAPI to get a list of VMs in each SR +and get the metadata for each VM, writing the metadata to the input_dir, +and creating symlink directories in the input_dir/by-sr directory. + +Below the input_dir, given by -d : +- In the /all/ directory, store all VM metadata files. +- In the /by-sr/ directory, create symlinks to the VM metadata files, + partitioned by a directory structure of SR UUIDs. +""" # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 from __future__ import print_function @@ -15,18 +31,21 @@ def logout(session): + """atexit handler to logout of the xapi session, ignoring any exceptions""" with contextlib.suppress(Exception): session.xenapi.session.logout() -def get_input_dir(): +def get_input_dir_from_argparser(): + """Parse command line arguments (-d input_dir) and return the input directory""" parser = argparse.ArgumentParser() parser.add_argument("-d", dest="input_dir", required=True, help="Specify the input directory") args = parser.parse_args() return args.input_dir -def get_vms_in_sr(session): +def get_vms_in_sr_from_xapi(session): + """Return a dictionary of SR UUIDs to VM UUIDs""" vms = session.xenapi.VM.get_all_records() vbds = session.xenapi.VBD.get_all_records() vdis = session.xenapi.VDI.get_all_records() @@ -67,13 +86,18 @@ def get_vms_in_sr(session): def main(): + """Main function to save VM metadata files and link them by SR UUID""" + + # Get a session for the local host, login and register a logout handler session = XenAPI.xapi_local() session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-linkvmsbysr.py") atexit.register(logout, session) - input_dir = get_input_dir() - vms_in_sr = get_vms_in_sr(session) + # Parse the input directory and get the VMs in each SR + input_dir = get_input_dir_from_argparser() + vms_in_sr = get_vms_in_sr_from_xapi(session) + # Create the directory structure and populate it with symlinks for sruuid in list(vms_in_sr.keys()): linkdir = "{}/by-sr/{}".format(input_dir, sruuid) if Path(linkdir).is_dir(): @@ -82,15 +106,15 @@ def main(): try: Path(linkdir).mkdir(parents=True) - except: + except OSError: print("Failed to create directory: %s" % linkdir, file=sys.stderr) for vmuuid in list(vms_in_sr[sruuid].keys()): + src = "../../all/{}.vmmeta".format(vmuuid) + targ = "{}/{}.vmmeta".format(linkdir, vmuuid) try: - src = "../../all/{}.vmmeta".format(vmuuid) - targ = "{}/{}.vmmeta".format(linkdir, vmuuid) os.symlink(src, targ) - except: + except OSError: print("Failed to create symlink: %s -> %s" % (src, targ), file=sys.stderr) session.xenapi.logout() diff --git a/scripts/mail-alarm b/python3/libexec/mail-alarm similarity index 94% rename from scripts/mail-alarm rename to python3/libexec/mail-alarm index 99be5c44de8..aab40edc46a 100755 --- a/scripts/mail-alarm +++ b/python3/libexec/mail-alarm @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # mail-alarm: uses ssmtp to send a mail message, to pool:other_config:mail-destination # @@ -11,18 +11,19 @@ # the only thing that needs be set is pool:other-config:ssmtp-mailhub from __future__ import print_function -import XenAPI -import sys + +import json import os +import re +import subprocess +import sys +import syslog import tempfile import traceback -import syslog -import json -import re -from xml.dom import minidom -from xml.sax.saxutils import unescape -from xml.parsers.expat import ExpatError from socket import getfqdn +from xml.dom import minidom # pytype: disable=pyi-error + +import XenAPI from xcp import branding # Go read man ssmtp.conf @@ -107,18 +108,22 @@ def get_mail_language(other_config): def get_config_file(): try: - return open("/etc/mail-alarm.conf").read() + with open("/etc/mail-alarm.conf", "r") as file: + return file.read() except: return default_config def load_mail_language(mail_language): + mail_language_file = "" try: mail_language_file = os.path.join( mail_language_pack_path, mail_language + ".json" ) - with open(mail_language_file, "r") as fileh: - return json.load(fileh, encoding="utf-8") + + with open(mail_language_file, encoding="utf-8") as fileh: + return json.load(fileh) + except IOError: log_err('Read mail language pack error:["%s"]' % (mail_language_file)) return None @@ -241,7 +246,9 @@ class CpuUsageAlarmETG(EmailTextGenerator): period="%d" % self.alarm_trigger_period, level="%.1f" % (self.alarm_trigger_level * 100.0), brand_console=branding.BRAND_CONSOLE, - cls_name=(self.cls == "Host" or self.params["is_control_domain"]) and "Server" or "VM", + cls_name=(self.cls == "Host" or self.params["is_control_domain"]) + and "Server" + or "VM", ) @@ -365,7 +372,9 @@ class MemoryUsageAlarmETG(EmailTextGenerator): period="%d" % self.alarm_trigger_period, level="%d" % self.alarm_trigger_level, brand_console=branding.BRAND_CONSOLE, - cls_name=(self.cls == "Host" or self.params["is_control_domain"]) and "Server" or "VM", + cls_name=(self.cls == "Host" or self.params["is_control_domain"]) + and "Server" + or "VM", ) @@ -723,7 +732,10 @@ class XapiMessage: xmldoc = minidom.parseString(xml) def get_text(tag): - return xmldoc.getElementsByTagName(tag)[0].firstChild.toxml() + text = xmldoc.getElementsByTagName(tag)[0].firstChild + if text is None: + raise ValueError("Get text failed with tag <{}>".format(tag)) + return text.toxml() self.name = get_text("name") self.priority = get_text("priority") @@ -797,7 +809,6 @@ class XapiMessage: return self.cached_etg if self.name == "ALARM": - ( value, name, @@ -827,8 +838,10 @@ class XapiMessage: self.mail_language, self.session, ) - elif name in ["memory_free_kib", # for Host - "memory_internal_free"]: # for VM + elif name in [ + "memory_free_kib", # for Host + "memory_internal_free", # for VM + ]: etg = MemoryUsageAlarmETG( self.cls, self.obj_uuid, @@ -875,7 +888,7 @@ class XapiMessage: self.mail_language, self.session, ) - elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", name): + elif name and re.match("sr_io_throughput_total_[0-9a-f]{8}$", name): etg = SRIOThroughputTotalAlertETG( self.cls, self.obj_uuid, @@ -980,7 +993,7 @@ def main(): 'Expected at least 1 argument but got none: ["%s"].' % (" ".join(sys.argv)) ) raise Exception("Insufficient arguments") - + session = XenAPI.xapi_local() ma_username = "__dom0__mail_alarm" session.xenapi.login_with_password( @@ -988,8 +1001,6 @@ def main(): ) try: - - other_config = get_pool_other_config(session) if "mail-min-priority" in other_config: min_priority = int(other_config["mail-min-priority"]) @@ -1015,36 +1026,44 @@ def main(): return 1 if not sender: - sender = "noreply@%s" % getfqdn().encode(charset) + sender = "noreply@%s" % getfqdn() # Replace macros in config file using search_replace list for s, r in search_replace: config = config.replace(s, r) # Write out a temporary file containing the new config - fd, fname = tempfile.mkstemp(prefix="mail-", dir="/tmp") + temp_file_path = "" try: - os.write(fd, config) - os.close(fd) + with tempfile.NamedTemporaryFile( + prefix="mail-", dir="/tmp", delete=False + ) as temp_file: + temp_file.write(config.encode()) + temp_file_path = temp_file.name # Run ssmtp to send mail - chld_stdin, chld_stdout = os.popen2( - ["/usr/sbin/ssmtp", "-C%s" % fname, destination] - ) - chld_stdin.write("From: %s\n" % sender) - chld_stdin.write('Content-Type: text/plain; charset="%s"\n' % charset) - chld_stdin.write("To: %s\n" % destination.encode(charset)) - chld_stdin.write( - "Subject: %s\n" % msg.generate_email_subject().encode(charset) - ) - chld_stdin.write("\n") - chld_stdin.write(msg.generate_email_body().encode(charset)) - chld_stdin.close() - chld_stdout.close() - os.wait() - + with subprocess.Popen( + ["/usr/sbin/ssmtp", "-C%s" % temp_file_path, destination], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + ) as proc: + input_data = ( + "From: %s\n" + 'Content-Type: text/plain; charset="%s"\n' + "To: %s\n" + "Subject: %s\n" + "\n" + "%s" + ) % ( + sender, + charset, + destination, + msg.generate_email_subject(), + msg.generate_email_body(), + ) + proc.communicate(input=input_data.encode(charset)) finally: - os.unlink(fname) + os.remove(temp_file_path) finally: session.xenapi.session.logout() diff --git a/scripts/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py similarity index 66% rename from scripts/nbd_client_manager.py rename to python3/libexec/nbd_client_manager.py index bebe97a2587..d0655df9756 100644 --- a/scripts/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 """ Provides functions and a CLI for safely connecting to and disconnecting from @@ -6,39 +6,52 @@ """ import argparse +import fcntl +import json import logging import logging.handlers import os +import re import subprocess import time -import fcntl -import json -import re from datetime import datetime, timedelta - LOGGER = logging.getLogger("nbd_client_manager") LOGGER.setLevel(logging.DEBUG) -LOCK_FILE = '/var/run/nonpersistent/nbd_client_manager' +LOCK_FILE = "/var/run/nonpersistent/nbd_client_manager" # Don't wait more than 10 minutes for the NBD device MAX_DEVICE_WAIT_MINUTES = 10 +class InvalidNbdDevName(Exception): + """ + The NBD device should be in this format: nbd{0-1000} + If we cannot match this pattern, raise this exception + """ + +class NbdConnStateTimeout(Exception): + """ + If we cannot get the connection status of a nbd device, + raise this exception. + """ + class NbdDeviceNotFound(Exception): """ The NBD device file does not exist. Raised when there are no free NBD devices. """ + def __init__(self, nbd_device): - super(NbdDeviceNotFound, self).__init__( - "NBD device '{}' does not exist".format(nbd_device)) + super().__init__( + "NBD device '{}' does not exist".format(nbd_device) + ) self.nbd_device = nbd_device - -class FileLock(object): +class FileLock: # pragma: no cover """Container for data relating to a file lock""" + def __init__(self, path): self._path = path self._lock_file = None @@ -46,7 +59,8 @@ def __init__(self, path): def _lock(self): """Acquire the lock""" flags = fcntl.LOCK_EX - self._lock_file = open(self._path, 'w+') + # pylint: disable=consider-using-with + self._lock_file = open(self._path, "w+", encoding="utf8") fcntl.flock(self._lock_file, flags) def _unlock(self): @@ -72,26 +86,22 @@ def _call(cmd_args, error=True): If [error] and exit code != 0, log and throws a CalledProcessError. """ LOGGER.debug("Running cmd %s", cmd_args) + # pylint: disable=consider-using-with proc = subprocess.Popen( - cmd_args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True + cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, + universal_newlines=True ) - stdout, stderr = proc.communicate() + _, stderr = proc.communicate() if error and proc.returncode != 0: LOGGER.error( - "%s exitted with code %d: %s", - ' '.join(cmd_args), - proc.returncode, - stderr) + "%s exited with code %d: %s", " ".join(cmd_args), proc.returncode, stderr + ) raise subprocess.CalledProcessError( - returncode=proc.returncode, - cmd=cmd_args, - output=stderr) + returncode=proc.returncode, cmd=cmd_args, output=stderr + ) return proc.returncode @@ -105,7 +115,7 @@ def _is_nbd_device_connected(nbd_device): # 1 for a non-existent file. if not os.path.exists(nbd_device): raise NbdDeviceNotFound(nbd_device) - cmd = ['nbd-client', '-check', nbd_device] + cmd = ["nbd-client", "-check", nbd_device] returncode = _call(cmd, error=False) if returncode == 0: return True @@ -125,39 +135,50 @@ def _find_unused_nbd_device(): if not _is_nbd_device_connected(nbd_device=nbd_device): return nbd_device + # If there are 1000 nbd devices (unlikely) and all are connected + raise NbdDeviceNotFound(nbd_device) # pyright:ignore[reportPossiblyUnboundVariable] def _wait_for_nbd_device(nbd_device, connected): deadline = datetime.now() + timedelta(minutes=MAX_DEVICE_WAIT_MINUTES) while _is_nbd_device_connected(nbd_device=nbd_device) != connected: if datetime.now() > deadline: - raise Exception( + raise NbdConnStateTimeout( "Timed out waiting for connection state of device %s to be %s" - % (nbd_device, connected)) + % (nbd_device, connected) + ) LOGGER.debug( - 'Connection status of NBD device %s not yet %s, waiting', + "Connection status of NBD device %s not yet %s, waiting", nbd_device, - connected) + connected, + ) time.sleep(0.1) + PERSISTENT_INFO_DIR = "/var/run/nonpersistent/nbd" + def _get_persistent_connect_info_filename(device): """ Return the full path for the persistent file containing the connection details. This is based on the device name, so /dev/nbd0 -> /var/run/nonpersistent/nbd/0 """ - number = re.search('/dev/nbd([0-9]+)', device).group(1) - return PERSISTENT_INFO_DIR + '/' + number + matched = re.search("/dev/nbd([0-9]+)", device) + if not matched: + raise InvalidNbdDevName("Can not get the nbd number") + number = matched.group(1) + return PERSISTENT_INFO_DIR + "/" + number + def _persist_connect_info(device, path, exportname): if not os.path.exists(PERSISTENT_INFO_DIR): os.makedirs(PERSISTENT_INFO_DIR) filename = _get_persistent_connect_info_filename(device) - with open(filename, 'w') as info_file: - info_file.write(json.dumps({'path':path, 'exportname':exportname})) + with open(filename, "w", encoding="utf-8") as info_file: + info_file.write(json.dumps({"path": path, "exportname": exportname})) + def _remove_persistent_connect_info(device): try: @@ -165,33 +186,48 @@ def _remove_persistent_connect_info(device): except OSError: pass + def connect_nbd(path, exportname): """Connects to a free NBD device using nbd-client and returns its path""" # We should not ask for too many nbds, as we might not have enough memory - _call(['modprobe', 'nbd', 'nbds_max=24']) + _call(["modprobe", "nbd", "nbds_max=24"]) retries = 0 while True: try: with FILE_LOCK: nbd_device = _find_unused_nbd_device() - cmd = ['nbd-client', '-unix', path, nbd_device, - '-timeout', '60', '-name', exportname] + cmd = [ + "nbd-client", + "-unix", + path, + nbd_device, + "-timeout", + "60", + "-name", + exportname, + ] _call(cmd) _wait_for_nbd_device(nbd_device=nbd_device, connected=True) _persist_connect_info(nbd_device, path, exportname) - nbd = (nbd_device[len('/dev/'):] - if nbd_device.startswith('/dev/') else nbd_device) - with open("/sys/block/" + nbd + "/queue/scheduler", "w") as fd: + nbd = ( + nbd_device[len("/dev/") :] + if nbd_device.startswith("/dev/") + else nbd_device + ) + with open("/sys/block/" + nbd + "/queue/scheduler", + "w", encoding="utf-8") as fd: fd.write("none") # Set the NBD queue size to the same as the qcow2 cluster size - with open("/sys/block/" + nbd + "/queue/max_sectors_kb", "w") as fd: + with open("/sys/block/" + nbd + "/queue/max_sectors_kb", + "w", encoding="utf-8") as fd: fd.write("512") - with open("/sys/block/" + nbd + "/queue/nr_requests", "w") as fd: + with open("/sys/block/" + nbd + "/queue/nr_requests", + "w", encoding="utf-8") as fd: fd.write("8") return nbd_device except NbdDeviceNotFound as exn: - LOGGER.warn('Failed to find free nbd device: %s', exn) + LOGGER.warning("Failed to find free nbd device: %s", exn) retries = retries + 1 if retries == 1: # We sleep for a shorter period first, in case an nbd device @@ -212,7 +248,7 @@ def disconnect_nbd_device(nbd_device): try: if _is_nbd_device_connected(nbd_device=nbd_device): _remove_persistent_connect_info(nbd_device) - cmd = ['nbd-client', '-disconnect', nbd_device] + cmd = ["nbd-client", "-disconnect", nbd_device] _call(cmd) _wait_for_nbd_device(nbd_device=nbd_device, connected=False) except NbdDeviceNotFound: @@ -220,53 +256,55 @@ def disconnect_nbd_device(nbd_device): pass - def _connect_cli(args): device = connect_nbd(path=args.path, exportname=args.exportname) - print device + print(device) def _disconnect_cli(args): disconnect_nbd_device(nbd_device=args.device) - -def _main(): +# The main function is covered by manual test and XenRT test +# Exclude it from unit test coverage +def _main(): # pragma: no cover # Configure the root logger to log into syslog # (Specifically, into /var/log/user.log) syslog_handler = logging.handlers.SysLogHandler( - address='/dev/log', - facility=logging.handlers.SysLogHandler.LOG_USER) + address="/dev/log", facility=logging.handlers.SysLogHandler.LOG_USER + ) # Ensure the program name is included in the log messages: - formatter = logging.Formatter('%(name)s: [%(levelname)s] %(message)s') + formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s") syslog_handler.setFormatter(formatter) logging.getLogger().addHandler(syslog_handler) try: parser = argparse.ArgumentParser( - description="Connect to and disconnect from an NBD device") + description="Connect to and disconnect from an NBD device" + ) - subparsers = parser.add_subparsers(dest='command_name') + subparsers = parser.add_subparsers(dest="command_name") parser_connect = subparsers.add_parser( - 'connect', - help='Connect to a free NBD device and return its path') + "connect", help="Connect to a free NBD device and return its path" + ) parser_connect.add_argument( - '--path', + "--path", required=True, - help="The path of the Unix domain socket of the NBD server") + help="The path of the Unix domain socket of the NBD server", + ) parser_connect.add_argument( - '--exportname', + "--exportname", required=True, - help="The export name of the device to connect to") + help="The export name of the device to connect to", + ) parser_connect.set_defaults(func=_connect_cli) parser_disconnect = subparsers.add_parser( - 'disconnect', - help='Disconnect from the given NBD device') + "disconnect", help="Disconnect from the given NBD device" + ) parser_disconnect.add_argument( - '--device', - required=True, - help="The path of the NBD device to disconnect") + "--device", required=True, help="The path of the NBD device to disconnect" + ) parser_disconnect.set_defaults(func=_disconnect_cli) args = parser.parse_args() @@ -276,5 +314,5 @@ def _main(): raise -if __name__ == '__main__': +if __name__ == "__main__": _main() diff --git a/scripts/print-custom-templates b/python3/libexec/print-custom-templates similarity index 90% rename from scripts/print-custom-templates rename to python3/libexec/print-custom-templates index 4ae15250951..882dc068732 100755 --- a/scripts/print-custom-templates +++ b/python3/libexec/print-custom-templates @@ -20,8 +20,8 @@ def main(argv): atexit.register(logout, session) templates = session.xenapi.VM.get_all_records_where('field "is_a_template" = "true" and field "is_a_snapshot" = "false"' ) - except: - print("Error retrieving template list", file=sys.stderr) + except Exception as e: + print(type(e).__name__, "retrieving template list:", e, file=sys.stderr) sys.exit(1) output=[] diff --git a/python3/libexec/probe-device-for-file b/python3/libexec/probe-device-for-file new file mode 100755 index 00000000000..46882c2cbec --- /dev/null +++ b/python3/libexec/probe-device-for-file @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 +# Checks for the existence of a file on a device + +import os +import sys + +try: + import xenfsimage +except ImportError: + import fsimage as xenfsimage +from contextlib import contextmanager + + +# https://stackoverflow.com/a/17954769 +@contextmanager +def stderr_redirected(to=os.devnull): + ''' + import os + + with stderr_redirected(to=filename): + print("from Python") + os.system("echo non-Python applications are also supported") + ''' + fd = sys.stderr.fileno() + + ##### assert that Python and C stdio write using the same file descriptor + ####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stderr")) == fd == 1 + + def _redirect_stderr(to): + sys.stderr.close() # + implicit flush() + os.dup2(to.fileno(), fd) # fd writes to 'to' file + sys.stderr = os.fdopen(fd, 'w') # Python writes to fd + + with os.fdopen(os.dup(fd), 'w') as old_stderr: + with open(to, 'w') as file: + _redirect_stderr(to=file) + try: + yield # allow code to be run with the redirected stderr + finally: + _redirect_stderr(to=old_stderr) # restore stderr. + # buffering and flags such as + # CLOEXEC may be different + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: %s " % sys.argv[0]) + sys.exit(2) + device = sys.argv[1] + file = sys.argv[2] + try: + # CA-316241 - fsimage prints to stderr + with stderr_redirected(to="/dev/null"): + fs = xenfsimage.open(device, 0) + if fs.file_exists(file): + os._exit(0) + except: + pass + os._exit(1) diff --git a/scripts/restore-sr-metadata.py b/python3/libexec/restore-sr-metadata.py similarity index 59% rename from scripts/restore-sr-metadata.py rename to python3/libexec/restore-sr-metadata.py index 105591a15c5..7fa4e92aa18 100644 --- a/scripts/restore-sr-metadata.py +++ b/python3/libexec/restore-sr-metadata.py @@ -1,37 +1,40 @@ -#!/usr/bin/python +#!/usr/bin/python3 # Restore SR metadata and VDI names from an XML file # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 import atexit -import XenAPI -import os, sys, time +import contextlib import getopt -from xml.dom.minidom import parse -import codecs +import io +import sys +from xml.dom.minidom import parse # pytype: disable=pyi-error -sys.stdout = codecs.getwriter("utf-8")(sys.stdout) -sys.stderr = codecs.getwriter("utf-8")(sys.stderr) +import XenAPI + +sys.stdout = io.open(sys.stdout.fileno(), "w", encoding="utf-8") +sys.stderr = io.open(sys.stderr.fileno(), "w", encoding="utf-8") -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) def usage(): - print >> sys.stderr, "%s -f -u " % sys.argv[0] - sys.exit(1) + print("%s -f -u " % sys.argv[0], file=sys.stderr) def main(argv): session = XenAPI.xapi_local() + + def logout(): + with contextlib.suppress(Exception): + session.xenapi.session.logout() + + atexit.register(logout) + session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-restore-sr-metadata") try: opts, args = getopt.getopt(argv, "hf:u:", []) - except getopt.GetoptError, err: - print str(err) + except getopt.GetoptError as err: + print(str(err)) usage() + sys.exit(1) infile = None sruuid = None @@ -43,15 +46,16 @@ def main(argv): if infile == None: usage() + sys.exit(1) try: doc = parse(infile) except: - print >> sys.stderr, "Error parsing %s" % infile + print("Error parsing %s" % infile, file=sys.stderr) sys.exit(1) if doc.documentElement.tagName != "meta": - print >> sys.stderr, "Unexpected root element while parsing %s" % infile + print("Unexpected root element while parsing %s" % infile, file=sys.stderr) sys.exit(1) for srxml in doc.documentElement.childNodes: @@ -60,19 +64,19 @@ def main(argv): name_label = srxml.getAttribute("name_label") name_descr = srxml.getAttribute("name_description") except: - print >> sys.stderr, "Error parsing SR tag" + print("Error parsing SR tag", file=sys.stderr) continue # only set attributes on the selected SR passed in on cmd line if sruuid is None or sruuid == "all" or sruuid == uuid: try: srref = session.xenapi.SR.get_by_uuid(uuid) - print "Setting SR (%s):" % uuid + print("Setting SR (%s):" % uuid) session.xenapi.SR.set_name_label(srref, name_label) - print " Name: %s " % name_label + print(" Name: %s " % name_label) session.xenapi.SR.set_name_description(srref, name_descr) - print " Description: %s" % name_descr + print(" Description: %s" % name_descr) except: - print >> sys.stderr, "Error setting SR data for: %s (%s)" % (uuid, name_label) + print("Error setting SR data for: %s (%s)" % (uuid, name_label), file=sys.stderr) sys.exit(1) # go through all the SR VDIs and set the name_label and description for vdixml in srxml.childNodes: @@ -81,20 +85,22 @@ def main(argv): vdi_label = vdixml.getAttribute("name_label") vdi_descr = vdixml.getAttribute("name_description") except: - print >> sys.stderr, "Error parsing VDI tag" + print("Error parsing VDI tag", file=sys.stderr) continue try: vdiref = session.xenapi.VDI.get_by_uuid(vdi_uuid) - print "Setting VDI (%s):" % vdi_uuid + print("Setting VDI (%s):" % vdi_uuid) session.xenapi.VDI.set_name_label(vdiref, vdi_label) - print " Name: %s" % vdi_label + print(" Name: %s" % vdi_label) session.xenapi.VDI.set_name_description(vdiref, vdi_descr) - print " Description: %s" % vdi_descr + print(" Description: %s" % vdi_descr) except: - print >> sys.stderr, "Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label) + print( + "Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label), + file=sys.stderr, + ) continue + if __name__ == "__main__": main(sys.argv[1:]) - - diff --git a/scripts/usb_reset.py b/python3/libexec/usb_reset.py similarity index 98% rename from scripts/usb_reset.py rename to python3/libexec/usb_reset.py index 82a690bcea0..573936ae1c3 100755 --- a/scripts/usb_reset.py +++ b/python3/libexec/usb_reset.py @@ -47,7 +47,7 @@ import errno import fcntl import grp -import xcp.logger as log +import xcp.logger as log # pytype: disable=import-error import logging import os import pwd @@ -132,7 +132,7 @@ def load_device_ids(device): # ignore and continue log.warning("Failed to remove device ids: {}".format(str(e))) - return uid, gid + return uid, gid # pyright: ignore[reportPossiblyUnboundVariable] # pragma: no cover # throw IOError, ValueError diff --git a/scripts/usb_scan.py b/python3/libexec/usb_scan.py similarity index 77% rename from scripts/usb_scan.py rename to python3/libexec/usb_scan.py index 25290b362a9..03d89f7baed 100755 --- a/scripts/usb_scan.py +++ b/python3/libexec/usb_scan.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright (C) Citrix Systems Inc. # @@ -21,16 +21,19 @@ # 2. check if device can be passed through based on policy file # 3. return the device info to XAPI in json format -from __future__ import print_function +# pylint: disable=redefined-outer-name +# pyright: reportPossiblyUnboundVariable=false, reportAttributeAccessIssue=false + import abc import argparse import json -import xcp.logger as log import logging -import pyudev import re import sys +import pyudev +import xcp.logger as log # pytype: disable=import-error + def log_list(l): for s in l: @@ -43,7 +46,7 @@ def log_exit(m): def hex_equal(h1, h2): - """ check if the value of hex string are equal + """check if the value of hex string are equal :param h1:(str) lhs hex string :param h2:(str) rhs hex string @@ -55,21 +58,22 @@ def hex_equal(h1, h2): return False -class UsbObject(dict): - """ Base class of USB classes, save USB properties in dict +class UsbObject(dict): # pytype: disable=ignored-metaclass + """Base class of USB classes, save USB properties in dict node(str): the key, device node """ + __metaclass__ = abc.ABCMeta def __init__(self, node): - super(UsbObject, self).__init__() + super().__init__() self.node = node def get_node(self): return self.node - def __hash__(self): + def __hash__(self): # pyright:ignore[reportIncompatibleVariableOverride] return hash(self.node) def __eq__(self, other): @@ -90,11 +94,12 @@ def debug_str(self, level=0): :param level: the indent level :return: the debug string """ - return self.indent(level) + self.__class__.__name__ + ": " + \ - str((self.node, self)) + return ( + self.indent(level) + self.__class__.__name__ + ": " + str((self.node, self)) + ) def is_initialized(self): - """ check if all properties are properly set + """check if all properties are properly set :return: bool, if properties are ready """ @@ -106,25 +111,23 @@ def _is_class_hub(self, key_class): return cls is not None and hex_equal(__VALUE_CLASS_HUB, cls) @abc.abstractmethod - def is_class_hub(self): - """ check if this belongs to a hub + def is_class_hub(self) -> bool: + """check if this belongs to a hub :return: bool, if this belongs to a hub """ - pass @abc.abstractmethod - def is_child_of(self, parent): - """ check if this is a child of parent + def is_child_of(self, parent) -> bool: + """check if this is a child of parent :param parent:(UsbObject) the parent to check against :return: """ - pass @staticmethod def validate_int(s, base=10): - """ validate if a string can be converted to int + """validate if a string can be converted to int :param s:(str) the string to be converted :param base:(int) the radix base of integer to convect @@ -138,10 +141,11 @@ def validate_int(s, base=10): class UsbDevice(UsbObject): - """ Class for USB device, save USB properties in UsbObject dict + """Class for USB device, save USB properties in UsbObject dict interfaces:([UsbInterface]) list of USB interfaces belonging to this device """ + _DESC_VENDOR = "ID_VENDOR_FROM_DATABASE" _DESC_PRODUCT = "ID_MODEL_FROM_DATABASE" @@ -156,13 +160,22 @@ class UsbDevice(UsbObject): _USB_SPEED = "speed" _PRODUCT_DESC = [_DESC_VENDOR, _DESC_PRODUCT] - _PRODUCT_DETAILS = [_VERSION, _ID_VENDOR, _ID_PRODUCT, _BCD_DEVICE, _SERIAL, - _CLASS, _CONF_VALUE, _NUM_INTERFACES, _USB_SPEED] + _PRODUCT_DETAILS = [ + _VERSION, + _ID_VENDOR, + _ID_PRODUCT, + _BCD_DEVICE, + _SERIAL, + _CLASS, + _CONF_VALUE, + _NUM_INTERFACES, + _USB_SPEED, + ] _PROPS = _PRODUCT_DESC + _PRODUCT_DETAILS _PROPS_NONABLE = _PRODUCT_DESC + [_SERIAL] def __init__(self, node, props1, props2): - """ initialise UsbDevice, set node and properties + """initialise UsbDevice, set node and properties :param node(str): device node :param props1(pyudev.Device): device, to get properties from UDEV @@ -170,14 +183,14 @@ def __init__(self, node, props1, props2): :param props2(pyudev.Device.attributes): device attributes, to get properties from sysfs """ - super(UsbDevice, self).__init__(node) + super().__init__(node) for p in self._PRODUCT_DESC: if props1.get(p) is not None: self[p] = props1.get(p) for p in self._PRODUCT_DETAILS: if props2.get(p) is not None: - self[p] = props2.get(p) + self[p] = props2.get(p).decode() for p in self._PROPS_NONABLE: if p not in self: self[p] = "" @@ -185,7 +198,7 @@ def __init__(self, node, props1, props2): self.interfaces = set() def debug_str(self, level=0): - s = super(UsbDevice, self).debug_str(level) + s = super().debug_str(level) for i in self.interfaces: s += i.debug_str(level + 1) return s @@ -203,7 +216,7 @@ def is_initialized(self): if not self.validate_int(self[p]): return False - return super(UsbDevice, self).is_initialized() + return super().is_initialized() def is_class_hub(self): return self._is_class_hub(self._CLASS) @@ -213,13 +226,13 @@ def is_child_of(self, parent): return False def add_interface(self, interface): - """ add an interface to this device + """add an interface to this device :param interface:(UsbInterface) the UsbInterface to add :return: None """ if interface in self.interfaces: - log.debug("overriding existing interface: " + interface) + log.debug("overriding existing interface: " + str(interface)) self.interfaces.remove(interface) self.interfaces.add(interface) @@ -230,18 +243,18 @@ def del_interface(self, interface): :return: None """ if interface in self.interfaces: - log.debug("removing interface: " + interface) + log.debug("removing interface: " + str(interface)) self.interfaces.remove(interface) def get_all_interfaces(self): - """ get all interfaces attached of this device + """get all interfaces attached of this device :return: set of all interfaces """ return self.interfaces def is_ready(self): - """ check if this device has all the interfaces attached + """check if this device has all the interfaces attached :return: bool, if it's ready to do policy check now """ @@ -250,9 +263,8 @@ def is_ready(self): class UsbInterface(UsbObject): - """ Class for USB interface, save USB properties in UsbObject dict + """Class for USB interface, save USB properties in UsbObject dict""" - """ _NUMBER = "bInterfaceNumber" _CLASS = "bInterfaceClass" _SUB_CLASS = "bInterfaceSubClass" @@ -261,20 +273,20 @@ class UsbInterface(UsbObject): _PROPS = [_NUMBER, _CLASS, _SUB_CLASS, _PROTOCOL] def __init__(self, node, props): - """ initialise UsbInterface, set node and properties + """initialise UsbInterface, set node and properties :param node(str): device node :param props(pyudev.Device.attributes): device attributes, to get properties from sysfs """ - super(UsbInterface, self).__init__(node) + super().__init__(node) for p in self._PROPS: if props.get(p) is not None: - self[p] = props.get(p) + self[p] = props.get(p).decode() + # pylint: disable-next=useless-parent-delegation # This parent call is superfluous def debug_str(self, level=0): - s = super(UsbInterface, self).debug_str(level) - return s + return super().debug_str(level) def is_class_hub(self): return self._is_class_hub(self._CLASS) @@ -287,13 +299,14 @@ def is_initialized(self): for p in self._PROPS: if p not in self or not self.validate_int(self[p], 16): return False - return super(UsbInterface, self).is_initialized() + return super().is_initialized() def is_child_of(self, parent): if isinstance(parent, UsbDevice) and parent.is_initialized(): conf_value = parent[UsbDevice._CONF_VALUE] - pattern = r"^{}:{}\.\d+$".format(re.escape(parent.get_node()), - re.escape(conf_value)) + pattern = r"^{}:{}\.\d+$".format( + re.escape(parent.get_node()), re.escape(conf_value) + ) return re.match(pattern, self.get_node()) is not None return False @@ -318,14 +331,15 @@ def get_usb_info(): return devices, interfaces -class Policy(object): - """ Parse policy file, and check if a UsbDevice can be passed through +class Policy: + """Parse policy file, and check if a UsbDevice can be passed through Policy file spec reference: https://support.citrix.com/article/CTX119722 rule_list: the list of parsed rule """ + _PATH = "/etc/xensource/usb-policy.conf" _CLASS = "class" @@ -336,36 +350,40 @@ class Policy(object): _BCD_DEVICE = "rel" # key in policy <--> key in usb device - _KEY_MAP_DEVICE = {_ID_VENDOR: UsbDevice._ID_VENDOR, - _ID_PRODUCT: UsbDevice._ID_PRODUCT, - _BCD_DEVICE: UsbDevice._BCD_DEVICE} + _KEY_MAP_DEVICE = { + _ID_VENDOR: UsbDevice._ID_VENDOR, # pylint: disable=protected-access + _ID_PRODUCT: UsbDevice._ID_PRODUCT, # pylint: disable=protected-access + _BCD_DEVICE: UsbDevice._BCD_DEVICE, # pylint: disable=protected-access + } # key in policy <--> key in usb interface - _KEY_MAP_INTERFACE = {_CLASS: UsbInterface._CLASS, - _SUBCLASS: UsbInterface._SUB_CLASS, - _PROTOCOL: UsbInterface._PROTOCOL} - - _PAT_KEY = r"\s*({}|{}|{}|{}|{}|{})\s*".format(_CLASS, _SUBCLASS, - _PROTOCOL, _ID_VENDOR, - _ID_PRODUCT, _BCD_DEVICE) + _KEY_MAP_INTERFACE = { + _CLASS: UsbInterface._CLASS, # pylint: disable=protected-access + _SUBCLASS: UsbInterface._SUB_CLASS, # pylint: disable=protected-access + _PROTOCOL: UsbInterface._PROTOCOL, # pylint: disable=protected-access + } + + _PAT_KEY = r"\s*({}|{}|{}|{}|{}|{})\s*".format( + _CLASS, _SUBCLASS, _PROTOCOL, _ID_VENDOR, _ID_PRODUCT, _BCD_DEVICE + ) _PATTERN = r"{}=\s*([0-9a-f]+)".format(_PAT_KEY) _ALLOW = "allow" def __init__(self): - """ parse policy file, generate rule list + """parse policy file, generate rule list Note: hubs are never allowed to pass through """ self.rule_list = [] try: - with open(self._PATH, "r") as f: + with open(self._PATH, encoding="utf-8", errors="backslashreplace") as f: log.debug("=== policy file begin") for line in f: log.debug(line[0:-1]) self.parse_line(line) log.debug("=== policy file end") - except IOError as e: + except OSError as e: # without policy file, no device will be allowed to passed through log_exit("Caught error {}, policy file error".format(str(e))) @@ -375,19 +393,21 @@ def __init__(self): def check_hex_length(self, name, value): if name in [self._CLASS, self._SUBCLASS, self._PROTOCOL]: - return 2 == len(value) + return len(value) == 2 if name in [self._ID_VENDOR, self._ID_PRODUCT, self._BCD_DEVICE]: - return 4 == len(value) + return len(value) == 4 return False @staticmethod def parse_error(pos, end, target, line): log_exit( - "Malformed policy rule, unable to parse '{}', malformed line: {}" - .format(target[pos:end], line)) + "Malformed policy rule, unable to parse '{}', malformed line: {}".format( + target[pos:end], line + ) + ) def parse_line(self, line): - """ parse one line of policy file, generate rule, and append it to + """parse one line of policy file, generate rule, and append it to self.rule_list Example: @@ -413,13 +433,10 @@ def parse_line(self, line): # 2. split action and match field # ^\s*(ALLOW|DENY)\s*:\s*([^:]*)$ try: - action, target = [part.strip() for part in line.split(":")] + action, target = (part.strip() for part in line.split(":")) except ValueError as e: if line.rstrip(): - log_exit("Caught error {}, malformed line: {}" - .format(str(e), line)) - # empty line, just return - return + log_exit("Caught error {}, malformed line: {}".format(str(e), line)) # 3. parse action # \s*(ALLOW|DENY)\s* @@ -429,37 +446,39 @@ def parse_line(self, line): elif action.lower() == "deny": rule[self._ALLOW] = False else: - log_exit("Malformed action'{}', malformed line: {}".format( - action, line)) + log_exit("Malformed action'{}', malformed line: {}".format(action, line)) # 4. parse key=value pairs # pattern = r"\s*(class|subclass|prot|vid|pid|rel)\s*=\s*([0-9a-f]+)" last_end = 0 - for matchNum, match in enumerate(re.finditer(self._PATTERN, target, - re.IGNORECASE)): - if last_end != match.start(): - self.parse_error(last_end, match.start(), target, line) + name = "" + value = "" + for m in re.finditer(self._PATTERN, target, re.IGNORECASE): + if last_end != m.start(): + self.parse_error(last_end, m.start(), target, line) try: - name, value = [part.lower() for part in match.groups()] + name, value = (part.lower() for part in m.groups()) # This can happen if `part` is None except AttributeError: - self.parse_error(match.start(), match.end(), target, line) + self.parse_error(m.start(), m.end(), target, line) # This should never happen, because the regexp has exactly two # matching groups except ValueError: - self.parse_error(match.start(), match.end(), target, line) + self.parse_error(m.start(), m.end(), target, line) if not self.check_hex_length(name, value): - log_exit("hex'{}' length error, malformed line {}".format( - str(value), line)) + log_exit( + "hex'{}' length error, malformed line {}".format(str(value), line) + ) if name in rule: - log_exit("duplicated tag'{}' found, malformed line {}". - format(name, line)) + log_exit( + "duplicated tag'{}' found, malformed line {}".format(name, line) + ) rule[name] = value - last_end = match.end() + last_end = m.end() if last_end != len(target): self.parse_error(last_end, len(target) + 1, target, line) @@ -477,14 +496,20 @@ def match_device_interface(self, rule, device, interface): :return:(bool) if they match """ for k in [k for k in rule if k in self._KEY_MAP_DEVICE]: - log.debug("check {} props[{}] against {}".format( - interface.get_node(), k, str(rule))) + log.debug( + "check {} props[{}] against {}".format( + interface.get_node(), k, str(rule) + ) + ) if not hex_equal(rule[k], device[self._KEY_MAP_DEVICE[k]]): return False for k in [k for k in rule if k in self._KEY_MAP_INTERFACE]: - log.debug("check {} props[{}] against {}".format( - interface.get_node(), k, str(rule))) + log.debug( + "check {} props[{}] against {}".format( + interface.get_node(), k, str(rule) + ) + ) if not hex_equal(rule[k], interface[self._KEY_MAP_INTERFACE[k]]): return False @@ -549,16 +574,19 @@ def check(self, device): def parse_args(): - parser = argparse.ArgumentParser( - description="scanner to get USB devices info") - parser.add_argument("-d", "--diagnostic", dest="diagnostic", - action="store_true", - help="enable diagnostic mode") + parser = argparse.ArgumentParser(description="scanner to get USB devices info") + parser.add_argument( + "-d", + "--diagnostic", + dest="diagnostic", + action="store_true", + help="enable diagnostic mode", + ) return parser.parse_args() def to_pusb(device): - """ convert UsbDevice to pusb dict + """convert UsbDevice to pusb dict Example pusb dict: [ @@ -612,7 +640,7 @@ def to_pusb(device): def make_pusbs_list(devices, interfaces): - """ check the USB devices and interfaces against policy file, + """check the USB devices and interfaces against policy file, and return the pusb list that can be passed through :param devices:([UsbDevice]) USB device list we found in host @@ -633,7 +661,7 @@ def make_pusbs_list(devices, interfaces): return [to_pusb(d) for d in devices if d.is_ready() and policy.check(d)] -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover args = parse_args() if args.diagnostic: log.logToSyslog(level=logging.DEBUG) @@ -643,8 +671,8 @@ def make_pusbs_list(devices, interfaces): # get usb info try: devices, interfaces = get_usb_info() - except Exception as e: - log_exit("Failed to get usb info: {}".format(str(e))) + except Exception as ex: + log_exit("Failed to get usb info: {}".format(str(ex))) # debug info log_list(devices) diff --git a/python3/packages/inventory.py b/python3/packages/inventory.py new file mode 100644 index 00000000000..87847cf5cde --- /dev/null +++ b/python3/packages/inventory.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +""" +inventory.py + +This module defines functions to read and parse constants from the xensource-inventory file. +""" +import sys + +INVENTORY = "@INVENTORY@" +INSTALLATION_UUID = "INSTALLATION_UUID" + + +def read_kvpairs(filename): + """Read in a file of key-value pairs in the format used by the inventory file""" + all_entries = {} + with open(filename, 'r', encoding='utf-8') as f: + for line in f: + equals = line.index("=") + key = line[:equals] + value = line[equals+1:].strip().strip("'") + all_entries[key] = value + return all_entries + + +def parse(): + """Return the contents of the xensource inventory file as a dictionary""" + try: + return read_kvpairs(INVENTORY) + except FileNotFoundError as e: + print("Error: File '{}' not found. {}".format(INVENTORY, e), file=sys.stderr) + return {} + + +def get_localhost_uuid(): + """Return the UUID of the local host""" + return parse()[INSTALLATION_UUID] diff --git a/python3/packages/observer.py b/python3/packages/observer.py index 4f2966e5a9a..65bb6d6aeeb 100644 --- a/python3/packages/observer.py +++ b/python3/packages/observer.py @@ -44,16 +44,16 @@ def current_otel_time(): from logging.handlers import SysLogHandler from typing import List, Sequence -# The opentelemetry library may generate exceptions we aren't expecting, this code +# The OpenTelemetry library may generate exceptions we aren't expecting: This code # must not fail or it will cause the pass-through script to fail when at worst # this script should be a noop. As such, we sometimes need to catch broad exceptions: # pylint: disable=broad-exception-caught, too-many-locals, too-many-statements # wrapt.decorator adds the extra parameters so we shouldn't provide them: # pylint: disable=no-value-for-parameter -# We only want to import opentelemetry libraries if instrumentation is enabled +# We only want to import OpenTelemetry libraries when instrumentation is enabled # pylint: disable=import-outside-toplevel -DEBUG_ENABLED = False +DEBUG_ENABLED = os.getenv("XAPI_TEST") DEFAULT_MODULES = "LVHDSR,XenAPI,SR,SRCommand,util" FORMAT = "observer.py: %(message)s" handler = SysLogHandler(facility="local5", address="/dev/log") @@ -114,7 +114,7 @@ def _init_tracing(configs: List[str], config_dir: str): If configs is empty, return the noop span and patch_module functions. If configs are passed: - - Import the opentelemetry packages + - Import the OpenTelemetry packages - Read the configuration file - Create a tracer - Trace the script diff --git a/scripts/plugins/perfmon b/python3/perfmon/perfmon similarity index 95% rename from scripts/plugins/perfmon rename to python3/perfmon/perfmon index e3dc2452691..9f26f998fd4 100644 --- a/scripts/plugins/perfmon +++ b/python3/perfmon/perfmon @@ -2,11 +2,11 @@ # # A plugin for requesting perfmon actions via the xe host-call-plugin mechanism -import XenAPIPlugin import os import socket +import XenAPIPlugin -# TODO: put this info plus all the supported cmds in a shared file +# TODO: Document this information and all supported commands cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) cmdmaxlen = 256 diff --git a/scripts/perfmon.service b/python3/perfmon/perfmon.service similarity index 100% rename from scripts/perfmon.service rename to python3/perfmon/perfmon.service diff --git a/scripts/sysconfig-perfmon b/python3/perfmon/sysconfig-perfmon similarity index 100% rename from scripts/sysconfig-perfmon rename to python3/perfmon/sysconfig-perfmon diff --git a/scripts/plugins/disk-space b/python3/plugins/disk-space similarity index 100% rename from scripts/plugins/disk-space rename to python3/plugins/disk-space diff --git a/scripts/examples/python/echo.py b/python3/plugins/echo.py similarity index 74% rename from scripts/examples/python/echo.py rename to python3/plugins/echo.py index 57f70492c6c..27020e17065 100644 --- a/scripts/examples/python/echo.py +++ b/python3/plugins/echo.py @@ -5,8 +5,8 @@ import XenAPIPlugin - -def main(session, args): +# The 1st argument is the session. This plugin does not use it, hence use _: +def main(_, args): if "sleep" in args: secs = int(args["sleep"]) time.sleep(secs) diff --git a/scripts/plugins/extauth-hook b/python3/plugins/extauth-hook similarity index 100% rename from scripts/plugins/extauth-hook rename to python3/plugins/extauth-hook diff --git a/scripts/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py similarity index 82% rename from scripts/plugins/extauth-hook-AD.py rename to python3/plugins/extauth-hook-AD.py index 98b228c04e5..d3e89aae8c8 100755 --- a/scripts/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -2,6 +2,12 @@ # # extauth-hook-AD.py # +# This plugin manages the following configuration files for external authentication +# - /etc/nsswitch.conf +# - /etc/pam.d/sshd +# - /etc/pam.d/hcp_users +# - /etc/ssh/ssh_config +# # This module can be called directly as a plugin. It handles # Active Directory being enabled or disabled as the hosts external_auth_type, # or subjects being added or removed while AD is the external_auth_type, @@ -10,7 +16,6 @@ # Alternatively, the extauth-hook module can be called, which will # dispatch to the correct extauth-hook-.py module automatically. import abc -import sys import subprocess import os import shutil @@ -18,18 +23,11 @@ import logging import logging.handlers from collections import OrderedDict -from enum import Enum -import XenAPIPlugin -import XenAPI +import XenAPIPlugin -# this plugin manage following configuration files for external auth -# - /etc/nsswitch.conf -# - /etc/pam.d/sshd -# - /etc/pam.d/hcp_users -# - /etc/ssh/ssh_config -# pylint: disable=super-with-arguments +# pylint: disable=too-few-public-methods HCP_USERS = "/etc/security/hcp_ad_users.conf" @@ -45,7 +43,7 @@ def setup_logger(): log = logging.getLogger() if not os.path.exists(addr): - log.warning("{} not available, logs are not redirected".format(addr)) + log.warning("%s not available, logs are not redirected", addr) return # Send to syslog local5, which will be redirected to xapi log /var/log/xensource.log @@ -62,55 +60,34 @@ def setup_logger(): logger = logging.getLogger(__name__) -def run_cmd(cmd, log_cmd=True): - """Helper function to run command""" +def run_cmd(command: "list[str]"): + """Helper function to run a command and log the output""" try: - result = subprocess.check_output(cmd) - if log_cmd: - msg = "{} -> {}".format(cmd, result) - logger.debug(msg) - return result.strip() - except Exception: # pylint: disable=broad-except - logger.exception("Failed to run command %s", cmd) - return None - + output = subprocess.check_output(command, universal_newlines=True) + logger.debug("%s -> %s", command, output.strip()) -class ADBackend(Enum): - """Enum for AD backend""" - BD_PBIS = 0 - BD_WINBIND = 1 + except OSError: + logger.exception("Failed to run command %s", command) -# pylint: disable=useless-object-inheritance, too-few-public-methods -class ADConfig(object): +class ADConfig(abc.ABC): """Base class for AD configuration""" - #pylint: disable=too-many-arguments def __init__(self, path, session, args, ad_enabled=True, load_existing=True, file_mode=0o644): self._file_path = path self._session = session self._args = args self._lines = [] - self._backend = self._get_ad_backend() self._ad_enabled = ad_enabled self._file_mode = file_mode if load_existing and os.path.exists(self._file_path): - with open(self._file_path, 'r') as file: + with open(self._file_path, "r", encoding="utf-8") as file: lines = file.readlines() self._lines = [l.strip() for l in lines] - def _get_ad_backend(self): - """Get active AD backend""" - if self._args.get("ad_backend", "winbind") == "pbis": - logger.debug("pbis is used as AD backend") - return ADBackend.BD_PBIS - - logger.debug("winbind is used as AD backend") - return ADBackend.BD_WINBIND @abc.abstractmethod - def _apply_to_cache(self): - pass + def _apply_to_cache(self): ... def apply(self): """Apply configuration""" @@ -164,11 +141,7 @@ def __init__(self, session, args, ad_enabled=True): def _apply_to_cache(self): if self._ad_enabled: - if self._backend == ADBackend.BD_PBIS: - ad_pam_module = "/lib/security/pam_lsass.so" - else: - ad_pam_module = "pam_winbind.so" - content = self.ad_pam_format.format(ad_module=ad_pam_module, + content = self.ad_pam_format.format(ad_module="pam_winbind.so", user_list=HCP_USERS, group_list=HCP_GROUPS) else: content = self.no_ad_pam @@ -176,7 +149,6 @@ def _apply_to_cache(self): class DynamicPam(ADConfig): - #pylint: disable=too-few-public-methods """Base class to manage AD users and groups configure which permit pool admin ssh""" def __init__(self, path, session, args, ad_enabled=True): @@ -211,16 +183,6 @@ def _is_pool_admin(self, subject_rec): logger.warning("subject %s does not have role", subject_rec) return False - def _format_item(self, item): - space_replacement = "+" - if self._backend == ADBackend.BD_PBIS: - if space_replacement in item: - raise ValueError( - "{} is not permitted in subject name".format(space_replacement)) - # PBIS relace space with "+", eg "ab cd" -> "ab++cd" - # PBIS pam module will reverse it back - return item.replace(" ", space_replacement) - return item def _is_responsible_for(self, subject_rec): try: @@ -230,23 +192,19 @@ def _is_responsible_for(self, subject_rec): return False @abc.abstractmethod - def _match_subject(self, subject_rec): - pass + def _match_subject(self, subject_rec): ... @abc.abstractmethod - def _add_subject(self, subject_rec): - pass + def _add_subject(self, subject_rec): ... def _install(self): if self._ad_enabled: super(DynamicPam, self)._install() - else: - if os.path.exists(self._file_path): - os.remove(self._file_path) + elif os.path.exists(self._file_path): + os.remove(self._file_path) class UsersList(DynamicPam): - #pylint: disable=too-few-public-methods """Class manage users which permit pool admin ssh""" def __init__(self, session, arg, ad_enabled=True): @@ -257,13 +215,11 @@ def _match_subject(self, subject_rec): def _add_upn(self, subject_rec): sep = "@" + upn = "" try: upn = subject_rec["other_config"]["subject-upn"] user, domain = upn.split(sep) - if self._backend == ADBackend.BD_PBIS: - # PBIS convert domain to UPPER case, we revert it back - domain = domain.lower() - self._lines.append(u"{}{}{}".format(user, sep, domain)) + self._lines.append("{}{}{}".format(user, sep, domain)) except KeyError: logger.info("subject does not have upn %s", subject_rec) except ValueError: @@ -272,15 +228,12 @@ def _add_upn(self, subject_rec): def _add_subject(self, subject_rec): try: sid = subject_rec['subject_identifier'] - name = subject_rec["other_config"]["subject-name"] - formatted_name = self._format_item(name) + formatted_name = subject_rec["other_config"]["subject-name"] logger.debug("Permit user %s, Current sid is %s", formatted_name, sid) self._lines.append(formatted_name) - # If ssh key is permittd in authorized_keys, + # If the ssh key is permitted in the authorized_keys file, # The original name is compared, add UPN and original name - if self._backend == ADBackend.BD_PBIS and name != formatted_name: - self._lines.append(name) self._add_upn(subject_rec) # pylint: disable=broad-except except Exception as exp: @@ -288,7 +241,6 @@ def _add_subject(self, subject_rec): class GroupsList(DynamicPam): - #pylint: disable=too-few-public-methods """Class manage groups which permit pool admin ssh""" def __init__(self, session, arg, ad_enabled=True): @@ -300,8 +252,7 @@ def _match_subject(self, subject_rec): def _add_subject(self, subject_rec): try: sid = subject_rec['subject_identifier'] - name = self._format_item( - subject_rec["other_config"]["subject-name"]) + name = subject_rec["other_config"]["subject-name"] logger.debug("Permit group %s, Current sid is %s", name, sid) self._lines.append(name) # pylint: disable=broad-except @@ -311,7 +262,7 @@ def _add_subject(self, subject_rec): class KeyValueConfig(ADConfig): """ - Only support configure files with key value in each line, seperated by sep + Only support configure files with key value in each line, separated by sep Otherwise, it will be just copied and un-configurable If multiple lines with the same key exists, only the first line will be configured """ @@ -319,7 +270,6 @@ class KeyValueConfig(ADConfig): _special_line_prefix = "__key_value_config_sp_line_prefix_" _empty_value = "" - #pylint: disable=too-many-arguments def __init__(self, path, session, args, ad_enabled=True, load_existing=True, file_mode=0o644, sep=": ", comment="#"): super(KeyValueConfig, self).__init__(path, session, @@ -364,7 +314,7 @@ def _apply_value(self, key, value): if self._is_special_line(key): line = value else: # normal line, construct the key value pair - sep = self._sep if self._sep else " " + sep = self._sep or " " line = "{}{}{}".format(key, sep, value) self._lines.append(line) @@ -382,10 +332,7 @@ def __init__(self, session, args, ad_enabled=True): "/etc/nsswitch.conf", session, args, ad_enabled) modules = "files sss" if ad_enabled: - if self._backend == ADBackend.BD_PBIS: - modules = "files sss lsass" - else: - modules = "files hcp winbind" + modules = "files hcp winbind" self._update_key_value("passwd", modules) self._update_key_value("group", modules) self._update_key_value("shadow", modules) @@ -475,7 +422,7 @@ def after_extauth_enable(session, args): def after_xapi_initialize(session, args): - """Callback afer xapi initialize""" + """Callback after xapi initialization""" return refresh_all_configurations(session, args, "after_xapi_initialize") @@ -485,7 +432,7 @@ def after_subject_add(session, args): def after_subject_remove(session, args): - """Callbackk after remove subject""" + """Callback after remove subject""" return refresh_dynamic_pam(session, args, "after_subject_remove") diff --git a/scripts/plugins/install-supp-pack b/python3/plugins/install-supp-pack similarity index 90% rename from scripts/plugins/install-supp-pack rename to python3/plugins/install-supp-pack index 8143215c4b2..83db0303186 100755 --- a/scripts/plugins/install-supp-pack +++ b/python3/plugins/install-supp-pack @@ -36,8 +36,8 @@ def install(session, args): vdi_ref = None try: vdi_ref = session.xenapi.VDI.get_by_uuid(vdi) - except: - raise ArgumentError("VDI parameter invalid") + except Exception as exc: + raise ArgumentError("VDI parameter invalid") from exc inventory = xcp.environ.readInventory() this_host_uuid = inventory["INSTALLATION_UUID"] @@ -46,8 +46,8 @@ def install(session, args): update_ref = None try: update_ref = session.xenapi.pool_update.introduce(vdi_ref) - except: - raise ArgumentError("VDI contains invalid update package") + except Exception as exc: + raise ArgumentError("VDI contains invalid update package") from exc try: session.xenapi.pool_update.apply(update_ref, this_host_ref) @@ -57,9 +57,9 @@ def install(session, args): # "['ERRORCODE', 'error_message']" # fetch the error_message and display it. error = json.loads(str(e))[1].encode("utf8") - except: + except Exception: error = str(e) - raise InstallFailure("Failed to install the supplemental pack", error) + raise InstallFailure("Failed to install the supplemental pack", error) from e return "OK" diff --git a/scripts/plugins/openvswitch-config-update b/python3/plugins/openvswitch-config-update similarity index 100% rename from scripts/plugins/openvswitch-config-update rename to python3/plugins/openvswitch-config-update diff --git a/scripts/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py similarity index 68% rename from scripts/plugins/test_extauth_hook_AD.py rename to python3/plugins/test_extauth_hook_AD.py index 71b5b7c95eb..eb9d1107e87 100644 --- a/scripts/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -1,22 +1,44 @@ """ Test module for extauth_hook_ad """ -#pylint: disable=invalid-name -import sys + +import logging import os from unittest import TestCase -from mock import MagicMock, patch -# mock modules to avoid dependencies -sys.modules["XenAPIPlugin"] = MagicMock() -sys.modules["XenAPI"] = MagicMock() -# pylint: disable=wrong-import-position -# Import must after mock modules -from extauth_hook_ad import StaticSSHPam, NssConfig, SshdConfig, UsersList, GroupsList +from unittest.mock import MagicMock, patch + +from python3.tests.import_helper import import_file_as_module, mocked_modules + + +with mocked_modules("XenAPIPlugin", "XenAPI"): + testee = import_file_as_module("python3/plugins/extauth-hook-AD.py") + # Will be replaced by updating the tests to call testee.function_name() + run_cmd = testee.run_cmd + NssConfig = testee.NssConfig + UsersList = testee.UsersList + GroupsList = testee.GroupsList + SshdConfig = testee.SshdConfig + StaticSSHPam = testee.StaticSSHPam + + +def test_run_cmd(caplog): + """Assert the current buggy behavior of the run_cmd function after py3 migration""" + cmd = ["echo", " Hello World! "] + # Call the function under test, check the return value and capture the log message + with caplog.at_level(logging.DEBUG): + assert run_cmd(cmd) is None # The return value is None (not used in the code) + + # Assert the log message + assert caplog.records[0].message == "%s -> Hello World!" % (cmd) + + # Test the case where the command fails: + assert run_cmd(["bad command"]) is None + assert caplog.records[1].message == "Failed to run command ['bad command']" def line_exists_in_config(lines, line): """ - Helper function to detect whether configration match expectation + Helper function to check if the configuration matches the expectation """ return any(line.split() == l.split() for l in lines) @@ -24,8 +46,6 @@ def line_exists_in_config(lines, line): domain = "conappada.local" args_bd_winbind = {'auth_type': 'AD', 'service_name': domain, 'ad_backend': 'winbind'} -args_bd_pbis = {'auth_type': 'AD', - 'service_name': domain, 'ad_backend': 'pbis'} mock_session = MagicMock() subjects = ['OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda9'] @@ -34,8 +54,7 @@ def line_exists_in_config(lines, line): admin_roles = [admin_role] mock_session.xenapi.role.get_by_name_label.return_value = admin_roles -# pylint: disable=unused-argument, protected-access, redefined-outer-name, missing-function-docstring -# pylint: disable=too-many-arguments, missing-class-docstring, no-self-use +# pylint: disable=unused-argument, redefined-outer-name def build_user(domain_netbios, domain, name, is_admin=True): @@ -98,18 +117,10 @@ def test_ad_enabled_with_winbind(self, mock_rename, mock_chmod): enabled_keyward = "auth sufficient pam_winbind.so try_first_pass try_authtok" self.assertTrue(line_exists_in_config(static._lines, enabled_keyward)) - def test_ad_enabled_with_pbis(self, mock_rename, mock_chmod): - # pam_lsass should be used - mock_rename.side_effect = mock_rename_to_clean - static = StaticSSHPam(mock_session, args_bd_pbis) - static.apply() - enabled_keyward = "auth sufficient /lib/security/pam_lsass.so try_first_pass try_authtok" - self.assertTrue(line_exists_in_config(static._lines, enabled_keyward)) - -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestUsersList(TestCase): - @patch("extauth_hook_ad.open") + @patch("extauth_hook_AD.open") @patch("os.path.exists") @patch("os.remove") def test_ad_not_enabled(self, mock_remove, mock_exists, mock_open, mock_install): @@ -124,21 +135,12 @@ def test_permit_admin_user(self, mock_install): # Domain user with admin role should be included in config file user = build_user("CONNAPP", "CONAPPADA.LOCAL", "radmin", True) mock_session.xenapi.subject.get_record.return_value = user - dynamic = UsersList(mock_session, args_bd_pbis) + dynamic = UsersList(mock_session, args_bd_winbind) dynamic.apply() self.assertIn(r"CONNAPP\radmin", dynamic._lines) - self.assertIn(r"radmin@conappada.local", dynamic._lines) + self.assertIn(r"radmin@CONAPPADA.LOCAL", dynamic._lines) mock_install.assert_called() - def test_pbis_permit_admin_user_with_space(self, mock_install): - # Domain user name with space should be repalced by "+" with PBIS - user = build_user("CONNAPP", "conappada.local", "radmin l1", True) - mock_session.xenapi.subject.get_record.return_value = user - permit_user = r"CONNAPP\radmin++l1" - dynamic = UsersList(mock_session, args_bd_pbis) - dynamic.apply() - self.assertIn(permit_user, dynamic._lines) - mock_install.assert_called() def test_winbind_permit_admin_user_with_space(self, mock_install): # Domain user name with space should be surrounded by [] with winbind @@ -159,42 +161,8 @@ def test_not_permit_non_admin_user(self, mock_install): dynamic.apply() self.assertNotIn(permit_user, dynamic._lines) - def test_pbis_not_permit_pool_admin_with_plus_in_name(self, mock_install): - """ - Domain user name should not contain "+" - """ - user = build_user("CONNAPP", "conappada.local", "radm+in", True) - mock_session.xenapi.subject.get_record.return_value = user - permit_user = r"CONNAPP\radm+in" - dynamic = UsersList(mock_session, args_bd_pbis) - dynamic.apply() - self.assertNotIn(permit_user, dynamic._lines) - - def test_failed_to_add_one_admin_should_not_affact_others(self, mock_install): - """ - Failed to add one bad domain users should not affact others - """ - bad_user = build_user("CONNAPP", "conappada.local", "bad+in", True) - good_user = build_user("CONNAPP", "conappada.local", "good", True) - - mock_session_with_multi_users = MagicMock() - - subjects = ['OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda9', - 'OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda1'] - mock_session_with_multi_users.xenapi.subject.get_all.return_value = subjects - mock_session_with_multi_users.xenapi.subject.get_record.side_effect = [ - bad_user, good_user] - mock_session_with_multi_users.xenapi.role.get_by_name_label.return_value = admin_roles - - bad_user = r"CONNAPP\bad+in" - good_user = r"CONNAPP\good" - dynamic = UsersList(mock_session_with_multi_users, args_bd_pbis) - dynamic.apply() - self.assertIn(good_user, dynamic._lines) - self.assertNotIn(bad_user, dynamic._lines) - -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestGroups(TestCase): def test_permit_admin_group(self, mock_install): # Domain group with admin role should be included in config file @@ -224,7 +192,7 @@ def test_permit_admin_group_with_space(self, mock_install): self.assertIn(permit_group, dynamic._lines) -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestNssConfig(TestCase): def test_ad_not_enabled(self, mock_install): expected_config = "passwd: files sss" @@ -239,9 +207,9 @@ def test_ad_enabled(self, mock_install): self.assertTrue(line_exists_in_config(nss._lines, expected_config)) -@patch("extauth_hook_ad.run_cmd") -@patch("extauth_hook_ad.ADConfig._install") -@patch("extauth_hook_ad.open") +@patch("extauth_hook_AD.run_cmd") +@patch("extauth_hook_AD.ADConfig._install") +@patch("extauth_hook_AD.open") class TestSshdConfig(TestCase): def test_ad_not_enabled(self, mock_open, mock_install, mock_run_cmd): expected_config = "ChallengeResponseAuthentication no" diff --git a/scripts/poweron/IPMI.py b/python3/poweron/IPMI.py similarity index 100% rename from scripts/poweron/IPMI.py rename to python3/poweron/IPMI.py diff --git a/scripts/poweron/power-on.py b/python3/poweron/power-on.py similarity index 95% rename from scripts/poweron/power-on.py rename to python3/poweron/power-on.py index a76726a5019..7d1432e21c9 100644 --- a/scripts/poweron/power-on.py +++ b/python3/poweron/power-on.py @@ -3,6 +3,7 @@ # Example script which shows how to use the XenAPI to find a particular Host's management interface # and send it a wake-on-LAN packet. +import sys import syslog import time @@ -26,8 +27,8 @@ def waitForXapi(session, host): metrics = session.xenapi.host.get_metrics(host) try: finished = session.xenapi.host_metrics.get_live(metrics) - except: - pass + except Exception as e: + print(type(e).__name__, "occurred:", e, file=sys.stderr) return str(finished) diff --git a/scripts/poweron/wlan.py b/python3/poweron/wlan.py similarity index 96% rename from scripts/poweron/wlan.py rename to python3/poweron/wlan.py index 948ba9a5433..1506968c2bd 100755 --- a/scripts/poweron/wlan.py +++ b/python3/poweron/wlan.py @@ -65,6 +65,10 @@ def get_physical_pif(session, pif_ref): def wake_on_lan(session, host, remote_host_uuid): + """ + Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams + sent to the broadcast_addr. + """ # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: @@ -79,9 +83,6 @@ def wake_on_lan(session, host, remote_host_uuid): remote_pif = get_physical_pif(session, mgmt_pif) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) - - """Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams - sent to the broadcast_addr.""" # A Wake-On-LAN packet contains FF:FF:FF:FF:FF:FF followed by 16 repetitions of the target MAC address bin_payload = bytes.fromhex("F" * 12 + mac.replace(":", "") * 16) diff --git a/python3/stubs/XenAPI.pyi b/python3/stubs/XenAPI.pyi new file mode 100644 index 00000000000..ede1e13d5f5 --- /dev/null +++ b/python3/stubs/XenAPI.pyi @@ -0,0 +1,90 @@ +""" +Stub for the XenAPI module: https://xapi-project.github.io/xen-api/overview.html +""" + + +import http.client as httplib +import xmlrpc.client as xmlrpclib +from _typeshed import Incomplete as Incomplete + +translation: Incomplete +API_VERSION_1_1: str +API_VERSION_1_2: str + + +class Failure(Exception): + details: Incomplete + + def __init__(self, details) -> None: ... + + +class UDSHTTPConnection(httplib.HTTPConnection): + sock: Incomplete + + def connect(self) -> None: ... + + +class UDSTransport(xmlrpclib.Transport): + def add_extra_header(self, key, value) -> None: ... + + # def make_connection(self, host) -> None: ... + + +def notimplemented(name, *args, **kwargs) -> None: ... + + +class _Dispatcher: + """A dispatcher for the Xen-API. It is used to call methods on the server""" + def __init__(self, API_version, send, name) -> None: ... + def __getattr__(self, name) -> None: ... + def __call__(self, *args) -> None: ... + def login_with_password(self, username, password, version, client_name) -> None: + """Authenticate the session with the XenAPI server.""" + def logout(self) -> None: + """End the session with the XenAPI server.""" + + # Dynamic attributes that type checkers like pytype and pyright cannot check: + session: Incomplete + secret: Incomplete + SR: Incomplete + VDI: Incomplete + PBD: Incomplete + pool: Incomplete + host: Incomplete + pool_update: Incomplete + VM: Incomplete + + +class Session(xmlrpclib.ServerProxy): + """A server proxy and session manager for communicating with xapi using + the Xen-API. + + Example: + + session = Session('http://localhost/') + session.login_with_password('me', 'mypassword', '1.0', 'xen-api-scripts-xenapi.py') + session.xenapi.VM.start(vm_uuid) + session.xenapi.session.logout() + """ + + transport: Incomplete + last_login_method: Incomplete + last_login_params: Incomplete + API_version: Incomplete + xenapi: _Dispatcher + + def __init__( + self, + uri, + transport: Incomplete | None = ..., + encoding: Incomplete | None = ..., + verbose: int = ..., + allow_none: int = ..., + ignore_ssl: bool = ..., + ) -> None: ... + def xenapi_request(self, methodname, params) -> None: ... + + # def __getattr__(self, name) -> None: ... + + +def xapi_local() -> Session: ... diff --git a/python3/stubs/__init__.py b/python3/stubs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/python3/tests/stubs/dnf.py b/python3/stubs/dnf.py similarity index 100% rename from python3/tests/stubs/dnf.py rename to python3/stubs/dnf.py diff --git a/python3/stubs/xcp/branding.py b/python3/stubs/xcp/branding.py new file mode 100644 index 00000000000..30ff69600bf --- /dev/null +++ b/python3/stubs/xcp/branding.py @@ -0,0 +1,38 @@ +# Example xcp.branding module as test stub for test mail-alarm. +# python3/stubs is added to PYTHONPATH by pyproject.toml +COPYRIGHT_YEARS = '2009-2024' +PRODUCT_BRAND = 'XenServer' +PRODUCT_BRAND_DASHED = 'xenserver' +PRODUCT_NAME = 'xenenterprise' +COMPANY_NAME_LEGAL = 'Cloud Software Group, Inc.' +COMPANY_NAME_SHORT = 'Cloud Software Group' +COMPANY_DOMAIN = 'xenserver.com' +COMPANY_PRODUCT_BRAND = 'XenServer' +BRAND_CONSOLE = 'XenCenter' +BRAND_SERVER = 'XenServer Host' +BRAND_VDI = 'Virtual Desktops' +BRAND_CONSOLE_URL = 'https://www.xenserver.com/downloads' +ISO_PV_TOOLS_COPYRIGHT = 'Cloud Software Group, Inc. 2009-2024' +ISO_PV_TOOLS_LABEL = 'XenServer VM Tools' +COMPANY_NAME = 'Cloud Software Group, Inc.' +COMPANY = 'Cloud Software Group' +COMPANY_WEBSITE = 'www.xenserver.com' +PLATFORM_NAME = 'XCP' +PLATFORM_ORGANISATION = 'xen.org' +PLATFORM_WEBSITE = 'www.xen.org' +BRAND_GUEST = 'Virtual Machine' +BRAND_GUESTS = 'Virtual Machines' +BRAND_GUEST_SHORT = 'VM' +BRAND_GUESTS_SHORT = 'VMs' +BRAND_SERVERS = 'XenServer Hosts' +ISO_PV_TOOLS_PUBLISHER = 'Cloud Software Group, Inc.' +PRODUCT_MAJOR_VERSION = '8' +PRODUCT_MINOR_VERSION = '4' +PRODUCT_MICRO_VERSION = '0' +PRODUCT_VERSION_TEXT = '8' +PRODUCT_VERSION_TEXT_SHORT = '8' +PLATFORM_MAJOR_VERSION = '3' +PLATFORM_MINOR_VERSION = '4' +PLATFORM_MICRO_VERSION = '0' +PLATFORM_VERSION = '3.4.0' +PRODUCT_VERSION = '8.4.0' diff --git a/python3/stubs/xcp/cmd.pyi b/python3/stubs/xcp/cmd.pyi new file mode 100644 index 00000000000..950a6d28200 --- /dev/null +++ b/python3/stubs/xcp/cmd.pyi @@ -0,0 +1,13 @@ +from basedtyping import Untyped +from typing import Any +from xcp import logger as logger +from xcp.compat import open_defaults_for_utf8_text as open_defaults_for_utf8_text + +def runCmd(command: bytes | str | list[str], with_stdout: bool = False, with_stderr: bool = False, inputtext: bytes | str | None = None, **kwargs: Any) -> Any: ... + +class OutputCache: + cache: Untyped + def __init__(self): ... + def fileContents(self, fn, *args, **kwargs) -> Untyped: ... + def runCmd(self, command, with_stdout: bool = False, with_stderr: bool = False, inputtext: Untyped | None = None, **kwargs) -> Untyped: ... + def clearCache(self): ... diff --git a/python3/stubs/xcp/compat.pyi b/python3/stubs/xcp/compat.pyi new file mode 100644 index 00000000000..bd2c7cfa4a6 --- /dev/null +++ b/python3/stubs/xcp/compat.pyi @@ -0,0 +1,9 @@ +from basedtyping import Untyped +from typing import Any, IO + +def open_textfile(filename: str, mode: str, encoding: str = 'utf-8', **kwargs: Any) -> IO[str]: ... + +open_utf8: Untyped + +def open_with_codec_handling(filename: str, mode: str = 'r', encoding: str = 'utf-8', **kwargs: Any) -> IO[Any]: ... +def open_defaults_for_utf8_text(args: tuple[Any, ...] | None, kwargs: Any) -> tuple[str, Any]: ... diff --git a/python3/stubs/xcp/logger.pyi b/python3/stubs/xcp/logger.pyi new file mode 100644 index 00000000000..0b42b05eb47 --- /dev/null +++ b/python3/stubs/xcp/logger.pyi @@ -0,0 +1,8 @@ +# Minimal stub for xcp.logger module +def debug(*al, **ad) -> None: ... +def info(*al, **ad) -> None: ... +def error(*al, **ad) -> None: ... +def warning(*al, **ad) -> None: ... +def critical(*al, **ad) -> None: ... +def logToStdout(level) -> bool: ... +def logToSyslog(level) -> bool: ... diff --git a/python3/tests/conftest.py b/python3/tests/conftest.py new file mode 100644 index 00000000000..d0a4777e1f1 --- /dev/null +++ b/python3/tests/conftest.py @@ -0,0 +1,10 @@ +"""scripts/unit_test/conftest.py: Common pytest module for shared pytest fixtures""" +import pytest + +from .rootless_container import enter_private_mount_namespace + + +@pytest.fixture(scope="session") +def private_mount_namespace(): + """Enter a private mount namespace that allows us to test mount and unmount""" + return enter_private_mount_namespace() diff --git a/python3/tests/import_helper.py b/python3/tests/import_helper.py new file mode 100644 index 00000000000..2fdbd922b95 --- /dev/null +++ b/python3/tests/import_helper.py @@ -0,0 +1,70 @@ +"""helpers for unit-testing functions in scripts without permanent global mocks""" +import os +import sys +from contextlib import contextmanager +from types import ModuleType + +from typing import Generator +from mock import Mock + + +@contextmanager +def mocked_modules(*module_names: str) -> Generator[None, None, None]: + """Context manager that temporarily mocks the specified modules. + + :param module_names: Variable number of names of the modules to be mocked. + :yields: None + + During the context, the specified modules are added to the sys.modules + dictionary as instances of the ModuleType class. + This effectively mocks the modules, allowing them to be imported and used + within the context. After the context, the mocked modules are removed + from the sys.modules dictionary. + + Example usage: + ```python + with mocked_modules("module1", "module2"): + # Code that uses the mocked modules + ``` + """ + for module_name in module_names: + sys.modules[module_name] = Mock() + yield + for module_name in module_names: + sys.modules.pop(module_name) + + +def import_file_as_module(relative_script_path): # type:(str) -> ModuleType + """Import a Python script without the .py extension as a python module. + + :param relative_script_path (str): The relative path of the script to import. + :returns module: The imported module. + :raises: AssertionError: If the spec or loader is not available. + + Note: + - This function uses different methods depending on the Python version. + - For Python 2, it uses the imp module. + - For Python 3, it uses the importlib module. + + Example: + - import_script_as_module('scripts/mail-alarm') # Returns the imported module. + """ + script_path = os.path.dirname(__file__) + "/../../" + relative_script_path + module_name = os.path.basename(script_path).replace(".py", "").replace("-", "_") + + # For Python 3.11+: Import Python script without the .py extension: + # https://gist.github.com/bernhardkaindl/1aaa04ea925fdc36c40d031491957fd3: + # pylint: disable-next=import-outside-toplevel + from importlib import ( # pylint: disable=no-name-in-module + machinery, + util, + ) + + loader = machinery.SourceFileLoader(module_name, script_path) + spec = util.spec_from_loader(module_name, loader) + assert spec + assert spec.loader + module = util.module_from_spec(spec) + sys.modules[module_name] = module + spec.loader.exec_module(module) + return module diff --git a/python3/tests/observer/__init__.py b/python3/tests/observer/__init__.py new file mode 100644 index 00000000000..dbdea4ed0d7 --- /dev/null +++ b/python3/tests/observer/__init__.py @@ -0,0 +1,36 @@ +""" +Package providing helper definitions and functions like call_observer() +to run python3/packages/observer.py as a script using runpy.run_path(). +""" + +import os +import runpy +import sys + +from typing import Any, Dict + +testdir = os.path.dirname(__file__) +OBSERVER_PY = os.path.relpath(testdir + "/../../packages/observer.py") +TRACED_SCRIPT = os.path.relpath(testdir + "/traced_script.py") +TRACED_SCRIPT_PRINT = "Hello, I am a print() in traced_script.py.\n" + + +def call_observer(*args: str) -> Dict[str, Any]: + """ + Call the observer.py script and return its globals dictionary for checking it + + Note: This is only possible when the script is run using runpy.run_path() + and the script exits normally (does not raise and Exception like SystemExit). + + Features: + - __name__ is set to "__main__", so the module is run as a script. + - sys.argv is set to the passed arguments + - no mocks are used, so the actual observer.py script is run. + - sets os.environ["OBSERVER_DEBUG"] = "True" to enable debug logging + to let the tests check the debug messages for checking the reading + of the configuration files and setting up tracing. + """ + + os.environ["XAPI_TEST"] = "True" # Enable printing debug messages in observer.py + sys.argv = [OBSERVER_PY, *args] + return runpy.run_path(OBSERVER_PY, run_name="__main__") diff --git a/python3/tests/observer/all.conf b/python3/tests/observer/all.conf new file mode 100644 index 00000000000..843d5d7cc72 --- /dev/null +++ b/python3/tests/observer/all.conf @@ -0,0 +1 @@ +module_names=XenAPI,tests.observer.traced_script \ No newline at end of file diff --git a/python3/tests/observer/it_handles_errors.py b/python3/tests/observer/it_handles_errors.py new file mode 100644 index 00000000000..efe58c56c76 --- /dev/null +++ b/python3/tests/observer/it_handles_errors.py @@ -0,0 +1,147 @@ +""" +Test error handing of python3/packages/observer.py, calling it using call_observer() + +This module contains tests for the error handling functionality of the observer.py +script in the python3/packages directory. + +The tests are executed by calling the observer.py script via the call_observer() +function. The primary focus of these tests is to verify the behavior of the observer.py +script when various errors occur. + +The tests included in this module are: + +1. `it_handles_not_finding_the_script`: + + This test verifies that when the observer.py does not find the script to trace + is not found, it exits with the correct exit code and produces the expected output. + +2. `it_prints_exception_traceback`: + + This test verifies that when the traced script raises an exception, the observer.py + script captures the exception traceback and exits with the correct exit code. + +3. `it_shows_the_usage_message`: + + This test verifies that when the observer.py script is called without any arguments, + it exits with the correct exit code and produces the expected output. + +4. `it_handles_error_exit`: + + This test verifies that when the traced script exits with a non-zero exit code, the + observer.py script captures the exit code and produces the expected output. + +5. `it_does_not_trace_without_config`: + + This test verifies that when observer.py is called without a configuration + file, it does not trace the traced script and produces the expected output. + +The tests are run using the pytest framework and are executed by calling the +call_observer() function, which simulates running the observer.py script from the +command line. +""" + +import os + +import pytest +from pytest import CaptureFixture + +from . import OBSERVER_PY, TRACED_SCRIPT, TRACED_SCRIPT_PRINT, call_observer + + +def it_handles_not_finding_the_script(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and the traced script is not found: + - The test checks that the exit code and the captured output are as expected. + """ + nonexisting_script = "nonexisting_traced_script.py" + with pytest.raises(SystemExit) as exc_info: + call_observer(nonexisting_script, "arg") + + assert exc_info.value.code == 2 # Set as the exit code for a missing script + + # Check that the error message is as expected + with capsys.disabled(): + stderr = capsys.readouterr().err.splitlines() + assert stderr[0] == f"{OBSERVER_PY} {nonexisting_script} arg:" + assert stderr[1] == f"Script not found: {os.getcwd()}/{nonexisting_script}" + + +def it_prints_exception_traceback(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and an invalid argument is passed to to the traced script as its argument: + + - The traced script should raise an exception and exit with 139 + - The test checks that the exit code and the captured output are as expected. + """ + with pytest.raises(SystemExit) as exc_info: + call_observer(TRACED_SCRIPT, "not_an_int") + + # 139 is used as the exit code when an Exception in the traced script was caught + assert exc_info.value.code == 139 + + # Check that the error message is as expected + with capsys.disabled(): + stderr = capsys.readouterr().err.splitlines() + assert stderr[0] == f"{OBSERVER_PY} {TRACED_SCRIPT} not_an_int:" + assert stderr[1] == "Exception in the traced script:" + assert stderr[2] == "invalid literal for int() with base 10: 'not_an_int'" + assert stderr[3] == "Traceback (most recent call last):" + + +def it_shows_the_usage_message(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started as a script without any arguments: + - The test checks that the exit code and the captured output are as expected. + """ + + with pytest.raises(SystemExit) as exc_info: + call_observer() + assert exc_info.value.code == 31 + with capsys.disabled(): + stderr = capsys.readouterr().err + assert stderr == f"{OBSERVER_PY}: usage: command argument list\n" + + +def it_handles_error_exit(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and the traced script exits with a non-zero exit code: + - The expected exit code is passed to to the traced script as its argument. + - The traced script should print a message and exit with the given exit code. + - The test checks that the exit code and the captured output are as expected. + """ + + # Passing 1 to the traced script will make it print() and exit with code 1 + with pytest.raises(SystemExit) as exc_info: + call_observer(TRACED_SCRIPT, "1") + assert exc_info.value.code == 1 + with capsys.disabled(): + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + +def it_does_not_trace_without_config(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started without a configuration file: + + - The expected exit code is passed to to the traced script as its argument. + - The traced script should print a message and exit with 0 + - The test checks that the exit code and the captured output are as expected. + """ + + # Prepare the environment and run the observer.py script + os.environ["OBSERVER_CONFIG_DIR"] = "nonexisting_config_directory" + + # Passing 0 to the traced script will make it print() and exit with code 0 + globs = call_observer(TRACED_SCRIPT, "0") + + with capsys.disabled(): + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + # Check that the observer.py script didn't install the tracing functions + span = globs.get("span") + patch_module = globs.get("patch_module") + assert span and patch_module + assert span.__name__ == "_span_noop" + assert patch_module.__name__ == "_patch_module_noop" diff --git a/python3/tests/observer/it_traces.py b/python3/tests/observer/it_traces.py new file mode 100644 index 00000000000..99179c85a93 --- /dev/null +++ b/python3/tests/observer/it_traces.py @@ -0,0 +1,104 @@ +""" +Test that packages/observer.py, creates a tracer, calling it using call_observer() + +The tests included in this module are: + +1. `it_creates_a_tracer`: + + This test verifies that when the observer.py script is called with a configuration + file, it creates a tracer and sets the span and patch_module functions as expected. + +The tests are run using the pytest framework and are executed by calling the +call_observer() function, which simulates running the observer.py script from the +command line. + +The test directory contains a dummy `observer.conf` (currently empty) configuration +file that is used to enable tracing for the test. +""" + +import os +import types +from typing import Any, Dict + +from pytest import CaptureFixture, LogCaptureFixture + +from . import TRACED_SCRIPT, TRACED_SCRIPT_PRINT, call_observer, testdir + + +def assert_imported_modules(globals_dict_of_observer: Dict[str, Any]): + """Assert that the expected modules were imported by observer.py""" + + observer_modules = globals_dict_of_observer["sys"].modules + imported_modules = [ + "opentelemetry.baggage.propagation", + "opentelemetry.context", + "opentelemetry.exporter.zipkin.json", + "opentelemetry.sdk.resources", + "opentelemetry.sdk.trace.export", + "opentelemetry.trace", + ] + assert all(mod in observer_modules for mod in imported_modules) + + +def it_creates_a_tracer(caplog: LogCaptureFixture, capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, it: + - imports the opentelemetry packages [checked by this test] + - reads the configuration file [checked by this test] + - creates a tracer [checked by this test (using caplog)] + - sets the span() and patch_module() [checked by this test] + - runs the traced script [checked by this test] + - traces the script [not yet checked by this test] + """ + os.environ["OBSERVER_CONFIG_DIR"] = os.path.dirname(__file__) + + # Passing 0 to the traced script will make it print() and exit with code 0 + globals_dict_of_observer = call_observer(TRACED_SCRIPT, "0") + + with capsys.disabled(): + # If this test fails in your environment without any changes to the repo, + # check for import errors from observer.py:_init_tracing() in the pytest logs. + + # Get the span and patch_module functions from the module's globals + span = globals_dict_of_observer.get("span") + patch_module = globals_dict_of_observer.get("patch_module") + + # Assert that the span and patch_module are functions + assert callable(span) + assert callable(patch_module) + assert isinstance(span, types.FunctionType) + assert isinstance(patch_module, types.FunctionType) + + # Assert that span and patch_module are the expected tracing functions + assert span.__name__ == "span_of_tracers" + assert span.__qualname__ == "_init_tracing..span_of_tracers" + assert patch_module.__name__ == "_patch_module" + assert patch_module.__qualname__ == "_init_tracing.._patch_module" + + # Assert that the captured output is as expected + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + assert_imported_modules(globals_dict_of_observer) + assert_debug_logs(caplog) + + +def assert_debug_logs(caplog: LogCaptureFixture): + """ + Assert that the observer.py script read the configuration file all.conf + by expecting the configuration file and its content in the log messages. + """ + + msg = caplog.messages + if not msg: # pragma: no cover + print("No logs found in caplog, check that debug logging is enabled!") + expected_modules = "{'module_names': 'XenAPI,tests.observer.traced_script'}" + assert msg[1] == f"{testdir}/all.conf: {expected_modules}" + assert msg[2] == "module_names: ['XenAPI', 'tests.observer.traced_script']" + + # Assert that the observer.py script red the observer.conf configuration file + config = """{'otel_resource_attributes': '"key1=value1,key2=value2"'}""" + assert msg[0] == f"configs = ['{testdir}/observer.conf']" + assert msg[3] == f"{testdir}/observer.conf: {config}" + + # Assert that the observer.py script created a tracer + assert msg[4].startswith("tracers=[ "InstrumentMe": + """A method to be traced by packages/observer.py as part of tests""" + + print("Hello, I am a print() in traced_script.py.") + return self + + def return_int(self, return_int: str) -> int: + """A method to be traced by packages/observer.py as part of tests""" + return int(return_int) + + +def main(return_code_string: str) -> int: + """Main of the tested script, to be traced by packages/observer.py.""" + + return InstrumentMe().print().return_int(return_code_string) + + +if __name__ == "__main__": + # Only use sys.exit(ret) raising SystemExit if the return code is not 0 + # to allow test_observer_as_script() to get the globals of observer.py: + ret = main(sys.argv[-1]) + if ret: + sys.exit(ret) diff --git a/python3/tests/rootless_container.py b/python3/tests/rootless_container.py new file mode 100644 index 00000000000..30ff364ace3 --- /dev/null +++ b/python3/tests/rootless_container.py @@ -0,0 +1,83 @@ +"""rootless_container.py: Create a rootless container on any Linux and GitHub CI""" +import ctypes +import os + +# Unshare the user namespace, so that the calling process is moved into a new +# user namespace which is not shared with any previously existing process. +# Needed so that the current user id can be mapped to 0 for getting a new +# mount namespace. +CLONE_NEWUSER = 0x10000000 +# Unshare the mount namespace, so that the calling process has a private copy +# of its root directory namespace which is not shared with any other process: +CLONE_NEWNS = 0x00020000 +# Flags for mount(2): +MS_BIND = 4096 +MS_REC = 16384 +MS_PRIVATE = 1 << 18 + + +def unshare(flags): # type:(int) -> None + """Wrapper for the library call to unshare Linux kernel namespaces""" + lib = ctypes.CDLL(None, use_errno=True) + lib.unshare.argtypes = [ctypes.c_int] + rc = lib.unshare(flags) + if rc != 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno), flags) + + +def mount(source="none", target="", fs="", flags=0, options=""): + # type:(str, str, str, int, str) -> None + """Wrapper for the library call mount(). Supports Python2.7 and Python3.x""" + lib = ctypes.CDLL(None, use_errno=True) + lib.mount.argtypes = ( + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_ulong, + ctypes.c_char_p, + ) + result = lib.mount( + source.encode(), target.encode(), fs.encode(), flags, options.encode() + ) + if result < 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError( + errno, + "mount " + target + " (" + options + "): " + os.strerror(errno), + ) + + +def umount(target): # type:(str) -> None + """Wrapper for the Linux umount system call, supports Python2.7 and Python3.x""" + lib = ctypes.CDLL(None, use_errno=True) + result = lib.umount(ctypes.c_char_p(target.encode())) + if result < 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError(errno, "umount " + target + ": " + os.strerror(errno)) + + +def enter_private_mount_namespace(): + """Enter a private mount and user namespace with the user and simulate uid 0 + + Some code like mount() requires to be run as root. The container simulates + root-like privileges and a new mount namespace that allows mount() in it. + + Implements the equivalent of `/usr/bin/unshare --map-root-user --mount` + """ + + # Read the actual user and group ids before entering the new user namespace: + real_uid = os.getuid() + real_gid = os.getgid() + unshare(CLONE_NEWUSER | CLONE_NEWNS) + # Setup user map to map the user id to behave like uid 0: + with open("/proc/self/uid_map", "wb") as proc_self_user_map: + proc_self_user_map.write(b"0 %d 1" % real_uid) + with open("/proc/self/setgroups", "wb") as proc_self_set_groups: + proc_self_set_groups.write(b"deny") + # Setup group map for the user's gid to behave like gid 0: + with open("/proc/self/gid_map", "wb") as proc_self_group_map: + proc_self_group_map.write(b"0 %d 1" % real_gid) + # Private root mount in the mount namespace top support mounting a private tmpfs: + mount(target="/", flags=MS_REC | MS_PRIVATE) + return True diff --git a/python3/tests/test_dnf_plugins.py b/python3/tests/test_dnf_plugins.py index c7d5f587532..2f82b1eb5cb 100644 --- a/python3/tests/test_dnf_plugins.py +++ b/python3/tests/test_dnf_plugins.py @@ -3,6 +3,7 @@ import sys import json from unittest.mock import MagicMock, patch +from python3.tests.import_helper import import_file_as_module sys.modules["urlgrabber"] = MagicMock() @@ -14,8 +15,8 @@ # Some test case does not use self -from dnf_plugins import accesstoken -from dnf_plugins import ptoken +accesstoken = import_file_as_module("python3/dnf_plugins/accesstoken.py") +ptoken = import_file_as_module("python3/dnf_plugins/ptoken.py") REPO_NAME = "testrepo" @@ -31,7 +32,7 @@ def _mock_repo(a_token=None, p_token=None, baseurl=None): return mock_repo -@patch("dnf_plugins.accesstoken.urlgrabber") +@patch("accesstoken.urlgrabber") class TestAccesstoken(unittest.TestCase): """Test class for dnf access plugin""" @@ -74,7 +75,8 @@ class TestPtoken(unittest.TestCase): """Test class for ptoken dnf plugin""" def test_failed_to_open_ptoken_file(self): """Exception should raised if the system does not have PTOKEN_PATH""" - ptoken.PTOKEN_PATH = "/some/not/exist/path" + # Disable pyright warning as we need to set the PTOKEN_PATH to test the exception + ptoken.PTOKEN_PATH = "/some/not/exist/path" # pyright: ignore[reportAttributeAccessIssue] with self.assertRaises(Exception): ptoken.Ptoken(MagicMock(), MagicMock()).config() diff --git a/python3/tests/test_hfx_filename.py b/python3/tests/test_hfx_filename.py new file mode 100644 index 00000000000..ca3618f38c2 --- /dev/null +++ b/python3/tests/test_hfx_filename.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +This module provides unittest for hfx_filename +""" + +import sys +import unittest +from mock import MagicMock, patch, call +from python3.tests.import_helper import import_file_as_module + +# mock modules to avoid dependencies +sys.modules["XenAPI"] = MagicMock() + +hfx_filename = import_file_as_module("python3/bin/hfx_filename") + + +@patch("socket.socket") +class TestRpc(unittest.TestCase): + """ + This class tests blow functions: + rpc() + db_get_uuid() + read_field() + """ + def test_rpc(self, mock_socket): + """ + Tests rpc + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + recv_data = b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHelloWorld" + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + session_id = 0 + request = "socket request" + body = hfx_filename.rpc(session_id, request) + + # Assert that the socket methods were called as expected + expected_data = [ + b"POST /remote_db_access?session_id=0 HTTP/1.0\r\n", + b"Connection:close\r\n", + b"content-length:14\r\n", + b"\r\n", + b"socket request" + ] + mock_connected_socket.send.assert_has_calls([call(data) for data in expected_data]) + + expected_return = "HelloWorld" + self.assertEqual(expected_return, body) + + def test_rpc_international_character(self, mock_socket): + """ + Tests rpc using non-ascii characters + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + recv_data = b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHelloWorld" + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + session_id = 0 + # Use international character"socket 请求" as request + request = "socket 请求" + body = hfx_filename.rpc(session_id, request) + + # Assert that the socket methods were called as expected + expected_data = [ + b"POST /remote_db_access?session_id=0 HTTP/1.0\r\n", + b"Connection:close\r\n", + b"content-length:13\r\n", + b"\r\n", + request.encode('utf-8') + ] + mock_connected_socket.send.assert_has_calls([call(data) for data in expected_data]) + + expected_return = "HelloWorld" + self.assertEqual(expected_return, body) + + def test_db_get_uuid(self, mock_socket): + """ + Tests db_get_uuid + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + header = "HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n" + body = ("successHelloWorld" + "") + recv_data = (header + body).encode('utf-8') + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + expected_response = "HelloWorld" + response = hfx_filename.db_get_by_uuid(0, "pool_patch", "22345") + self.assertEqual(expected_response, response) + + def test_read_field(self, mock_socket): + """ + Tests read_field + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + header = "HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n" + body = ("successfile_name" + "") + recv_data = (header + body).encode('utf-8') + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + expected_filename = "file_name" + filename = hfx_filename.read_field(0, "pool_patch", "filename", "rf") + self.assertEqual(expected_filename, filename) + + +class TestParse(unittest.TestCase): + """ + This class tests function parse_string() + """ + def test_parse_string(self): + """ + Tests parse_string + """ + txt = ("successabcde" + "") + expected_txt = "abcde" + return_txt = hfx_filename.parse_string(txt) + self.assertEqual(expected_txt, return_txt) diff --git a/scripts/test_mail-alarm.py b/python3/tests/test_mail-alarm.py similarity index 92% rename from scripts/test_mail-alarm.py rename to python3/tests/test_mail-alarm.py index 2a918f5edbe..c1d225eeac2 100644 --- a/scripts/test_mail-alarm.py +++ b/python3/tests/test_mail-alarm.py @@ -2,22 +2,13 @@ # test_mail-alarm.py: uses unittest to test script "mail-alarm" # -import tempfile -import os -import shutil import sys import unittest -import mock +from unittest import mock -def nottest(obj): - obj.__test__ = False - return obj - -sys.path.append("./scripts/examples/python") -sys.modules["xcp"] = mock.Mock() - -log_file_global = None +from python3.tests.import_helper import import_file_as_module, mocked_modules +log_strs = "" XML_MESSAGE_TEMPLATE = """ 63102OpaqueRef:46be74f4-3a26-31a8-a629-d52584fe6ed3{alarm}3{cls}2e00443d-ac29-4940-8433-a15dda1e8f8e20170516T16:30:00Z0d985f5e-6d91-3410-f853-040d0906a4b9{body}""" @@ -52,28 +43,16 @@ def get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str): def log_err(err): - global log_file_global - with open(log_file_global, "a+") as fileh: - fileh.write("%s: %s\n" % (sys.argv[0], err)) + global log_strs # pylint: disable=global-statement + log_strs = log_strs + "%s: %s\n" % (sys.argv[0], err) + + +with mocked_modules("xcp"): + mailalarm = import_file_as_module("python3/libexec/mail-alarm") + mock_setup(mailalarm) class TestXapiMessage(unittest.TestCase): - def setUp(self): - global log_file_global - try: - self.work_dir = tempfile.mkdtemp(prefix="test-mail-alarm-") - log_file_global = os.path.join(self.work_dir, "user.log") - src_file = "./scripts/mail-alarm" - dst_file = os.path.join(self.work_dir, "mailalarm.py") - shutil.copyfile(src_file, dst_file) - sys.path.append(self.work_dir) - except: - raise - - def tearDown(self): - shutil.rmtree(self.work_dir, ignore_errors=True) - - @nottest def common_test_good_input( self, xmlalarm_str, @@ -83,11 +62,6 @@ def common_test_good_input( body_str, xmlbody_str=XML_BODY_COMMON, ): - import mailalarm - - # Emulate functions with Mock - mock_setup(mailalarm) - session = mock.Mock() tst_xml = get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str) @@ -100,7 +74,6 @@ def common_test_good_input( self.assertIn(subject_str, mail_subject) self.assertIn(body_str, mail_body) - @nottest def common_test_bad_input( self, xmlalarm_str, @@ -110,12 +83,6 @@ def common_test_bad_input( subtitle_str, xmlbody_str=XML_BODY_COMMON, ): - global log_file_global - import mailalarm - - # Emulate functions with Mock - mock_setup(mailalarm) - session = mock.Mock() tst_xml = get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str) @@ -124,9 +91,11 @@ def common_test_bad_input( mail_subject = obj_XapiMessage.generate_email_subject() mail_body = obj_XapiMessage.generate_email_body() + assert mail_subject and mail_body # They're tested by test_good_mail_language() - with open(log_file_global, "r") as fileh: - log_strs = fileh.read() + # Assert the logged error messages for the bad language pack that are + # recorded in `log_str` by `log_err()` when the language pack is not found + # by `generate_email_subject()` and `generate_email_body()`: self.assertIn("Read mail language pack error", log_strs) self.assertIn( @@ -142,7 +111,6 @@ def common_test_bad_input( log_strs, ) - os.remove(log_file_global) def test_good_mail_language(self): ## Test cpu_usage alarm diff --git a/python3/tests/test_nbd_client_manager.py b/python3/tests/test_nbd_client_manager.py new file mode 100644 index 00000000000..224a1c3e2ea --- /dev/null +++ b/python3/tests/test_nbd_client_manager.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +""" +This module provides unittest for nbd_client_manager.py +""" + +import unittest +import subprocess +from mock import MagicMock, patch, mock_open, call +from python3.tests.import_helper import import_file_as_module + +nbd_client_manager = import_file_as_module("python3/libexec/nbd_client_manager.py") + +@patch('subprocess.Popen') +class TestCallFunction(unittest.TestCase): + + def test_call_success(self, mock_popen): + mock_process = mock_popen.return_value + mock_process.communicate.return_value = ("ls -l output", "") + mock_process.returncode = 0 + + returncode = nbd_client_manager._call(["ls", "-l"]) + + self.assertEqual(returncode, 0) + + def test_call_failure(self, mock_popen): + mock_process = mock_popen.return_value + mock_process.communicate.return_value = ("", "err") + mock_process.returncode = 1 + + with self.assertRaises(subprocess.CalledProcessError) as cm: + nbd_client_manager._call(["invalid_cmd"]) + + self.assertEqual(cm.exception.returncode, 1) + +@patch('nbd_client_manager.os.path.exists') +class TestIsNbdDeviceConnected(unittest.TestCase): + + @patch('nbd_client_manager._call') + def test_nbd_device_connected(self, mock_call, mock_exists): + mock_exists.return_value = True + mock_call.return_value = 0 + + result = nbd_client_manager._is_nbd_device_connected('/dev/nbd0') + + self.assertTrue(result) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd0"], error=False) + + @patch('nbd_client_manager._call') + def test_nbd_device_not_connected(self, mock_call, mock_exists): + mock_exists.return_value = True + mock_call.return_value = 1 + + result = nbd_client_manager._is_nbd_device_connected('/dev/nbd1') + + self.assertFalse(result) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd1"], error=False) + + def test_nbd_device_not_found(self, mock_exists): + mock_exists.return_value = False + + # Testing the function with a non-existent device + with self.assertRaises(nbd_client_manager.NbdDeviceNotFound): + nbd_client_manager._is_nbd_device_connected('/dev/nbd2') + +@patch('nbd_client_manager._is_nbd_device_connected') +class TestFindUnusedNbdDevice(unittest.TestCase): + def test_find_unused_nbd_device(self, mock_is_nbd_device_connected): + # Mocking the function to return True for /dev/nbd0 and False for /dev/nbd1 + mock_is_nbd_device_connected.side_effect = [True, False] + + # Testing the function + unused_device = nbd_client_manager._find_unused_nbd_device() + + # Assertion + self.assertEqual(unused_device, "/dev/nbd1") + + def test_no_unused_nbd_device(self, mock_is_nbd_device_connected): + # Mocking the function to always raise NbdDeviceNotFound + mock_is_nbd_device_connected.side_effect = nbd_client_manager.NbdDeviceNotFound('/dev/nbd1') + + # Testing the function when no unused devices are found + with self.assertRaises(nbd_client_manager.NbdDeviceNotFound): + nbd_client_manager._find_unused_nbd_device() + +@patch('nbd_client_manager._is_nbd_device_connected') +class TestWaitForNbdDevice(unittest.TestCase): + def test_wait_for_nbd_device_connected(self, mock_is_nbd_device_connected): + mock_is_nbd_device_connected.return_value = True + nbd_client_manager._wait_for_nbd_device('/dev/nbd0', connected=True) + mock_is_nbd_device_connected.assert_called_once_with(nbd_device='/dev/nbd0') + + def test_wait_for_nbd_device_disconnected(self, mock_is_nbd_device_connected): + mock_is_nbd_device_connected.return_value = False + nbd_client_manager._wait_for_nbd_device('/dev/nbd1', connected=False) + mock_is_nbd_device_connected.assert_called_once_with(nbd_device='/dev/nbd1') + +class TestGetPersistentConnectInfoFilename(unittest.TestCase): + def test_get_persistent_connect_info_filename(self): + # Test for device /dev/nbd0 + device = "/dev/nbd0" + expected_filename = "/var/run/nonpersistent/nbd/0" + self.assertEqual(nbd_client_manager._get_persistent_connect_info_filename(device), + expected_filename) + +@patch('nbd_client_manager.os.makedirs') +@patch('nbd_client_manager.os.path.exists') +class TestPersistConnectInfo(unittest.TestCase): + + def test_persist_connect_info(self, mock_exists, mock_makedirs): + mock_exists.return_value = False + + # Test data + device = "/dev/nbd0" + path = "/some/path" + exportname = "example_export" + + # Setting up mock for file write + mock_file = mock_open() + with patch('builtins.open', mock_file): + # Run the function + nbd_client_manager._persist_connect_info(device, path, exportname) + + # Assertions + mock_makedirs.assert_called_once_with(nbd_client_manager.PERSISTENT_INFO_DIR) + mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') + mock_file().write.assert_called_once_with( + '{"path": "/some/path", "exportname": "example_export"}' + ) + + def test_persist_connect_info_directory_exists(self, mock_exists, mock_makedirs): + mock_exists.return_value = True + + # Test data + device = "/dev/nbd0" + path = "/some/path" + exportname = "example_export" + + # Setting up mock for file write + mock_file = mock_open() + with patch('builtins.open', mock_file): + # Run the function + nbd_client_manager._persist_connect_info(device, path, exportname) + + # Assertions + mock_makedirs.assert_not_called() + mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') + mock_file().write.assert_called_once_with( + '{"path": "/some/path", "exportname": "example_export"}' + ) + +class TestRemovePersistentConnectInfo(unittest.TestCase): + @patch('nbd_client_manager.os.remove') + def test_remove_persistent_connect_info(self, mock_os_remove): + nbd_client_manager._remove_persistent_connect_info('/dev/nbd0') + mock_os_remove.assert_called_once_with('/var/run/nonpersistent/nbd/0') + +class TestConnectNbd(unittest.TestCase): + @patch('nbd_client_manager._call') + @patch('nbd_client_manager._find_unused_nbd_device') + @patch('nbd_client_manager._wait_for_nbd_device') + @patch('nbd_client_manager._persist_connect_info') + @patch('nbd_client_manager.open') + @patch('nbd_client_manager.FILE_LOCK', MagicMock()) # Mocking FILE_LOCK + # pylint: disable=too-many-arguments + def test_connect_nbd(self, mock_openfile, mock_persist_info, + mock_wait_for_nbd, mock_find_unused, mock_call): + # Mocking necessary functions and file operations + mock_find_unused.return_value = "/dev/nbd0" + mock_call.return_value = 0 + mock_file_scheduler = MagicMock() + mock_file_max_sectors_kb = MagicMock() + mock_file_nr_requests = MagicMock() + mock_openfile.side_effect = [mock_file_scheduler, + mock_file_max_sectors_kb, + mock_file_nr_requests] + + # Testing the function + result = nbd_client_manager.connect_nbd("/path/of/socket/file", "export_name") + + # Assertions + self.assertEqual(result, "/dev/nbd0") + mock_find_unused.assert_called_once() + mock_call.assert_called() + mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=True) + mock_persist_info.assert_called_once_with( + "/dev/nbd0", "/path/of/socket/file", "export_name" + ) + # Checking open calls + mock_openfile.assert_has_calls([ + call("/sys/block/nbd0/queue/scheduler", "w", encoding="utf-8"), + call("/sys/block/nbd0/queue/max_sectors_kb", "w", encoding="utf-8"), + call("/sys/block/nbd0/queue/nr_requests", "w", encoding="utf-8") + ], any_order=True) + +@patch('nbd_client_manager._is_nbd_device_connected') +@patch('nbd_client_manager._remove_persistent_connect_info') +@patch('nbd_client_manager._call') +@patch('nbd_client_manager._wait_for_nbd_device') +class TestDisconnectNbdDevice(unittest.TestCase): + + def test_disconnect_nbd_device_connected(self, mock_wait_for_nbd, + mock_call, mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to return True + mock_is_connected.return_value = True + + # Testing the function when device is connected + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_called_once_with("/dev/nbd0") + mock_call.assert_called_once_with(["nbd-client", "-disconnect", "/dev/nbd0"]) + mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=False) + + def test_disconnect_nbd_device_disconnected(self, mock_wait_for_nbd, mock_call, + mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to return False + mock_is_connected.return_value = False + + # Testing the function when device is already disconnected + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_not_called() + mock_call.assert_not_called() + mock_wait_for_nbd.assert_not_called() + + def test_disconnect_nbd_device_not_found(self, mock_wait_for_nbd, mock_call, + mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to raise NbdDeviceNotFound + mock_is_connected.side_effect = nbd_client_manager.NbdDeviceNotFound('/dev/nbd0') + + # Testing the function when device is not found + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_not_called() + mock_call.assert_not_called() + mock_wait_for_nbd.assert_not_called() diff --git a/python3/tests/test_observer.py b/python3/tests/test_observer.py index 088183d5375..a8d6f238eec 100644 --- a/python3/tests/test_observer.py +++ b/python3/tests/test_observer.py @@ -4,13 +4,13 @@ import sys import unittest -from mock import MagicMock, mock_open, patch +from unittest.mock import MagicMock, mock_open, patch # Ensure observer is initialised as noop with patch("os.listdir") as mock_listdir: # Prevent it finding an observer.conf mock_listdir.return_value = [] - from packages import observer + from python3.packages import observer TEST_CONFIG = """ XS_EXPORTER_BUGTOOL_ENDPOINT='/var/log/dt/test' @@ -18,7 +18,7 @@ OTEL_RESOURCE_ATTRIBUTES='service.name=sm' """ TEST_OBSERVER_CONF = "test-observer.conf" -OBSERVER_OPEN = "packages.observer.open" +OBSERVER_OPEN = "python3.packages.observer.open" # # These are the modules that are mocked to avoid dependencies. diff --git a/python3/tests/test_perfmon.py b/python3/tests/test_perfmon.py new file mode 100644 index 00000000000..c133a1171ac --- /dev/null +++ b/python3/tests/test_perfmon.py @@ -0,0 +1,602 @@ +#!/usr/bin/env python3 +""" +This module provides unittest for perfmon +""" + +# pyright: reportAttributeAccessIssue=false + +import sys +import math +import unittest +from mock import MagicMock, patch, mock_open +from python3.tests.import_helper import import_file_as_module + +# mock modules to avoid dependencies +sys.modules["XenAPI"] = MagicMock() + +perfmon = import_file_as_module("python3/bin/perfmon") + + +@patch("subprocess.getoutput") +class TestGetFsUsage(unittest.TestCase): + '''Test get_percent_log_fs_usage and get_percent_fs_usage''' + def mock_subprocess_getoutput(self, cmd): + df_etc_passwd = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda1 18402132 2244748 15213668 13% / + """ + df_var_log = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 4054752 59820 3785220 2% /var/log + """ + if cmd == "df /etc/passwd": + return df_etc_passwd + if cmd == "df /var/log": + return df_var_log + return None + + def mock_subprocess_getoutput_same_file_system(self, cmd): + df_etc_passwd = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 18402132 2244748 15213668 13% / + """ + df_var_log = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 4054752 59820 3785220 2% /var/log + """ + if cmd == "df /etc/passwd": + return df_etc_passwd + if cmd == "df /var/log": + return df_var_log + return None + + def test_get_percent_log_fs_usage(self, mock_getoutput): + """Assert that get_percent_log_fs_usage returns as expected""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput + + expected_percentage = 0.02 + test_percentage = perfmon.get_percent_log_fs_usage(None) + self.assertAlmostEqual(test_percentage, expected_percentage, 7) + + def test_get_percent_log_fs_usage_same_file_system(self, mock_getoutput): + """Test where /etc/passwd and /var/log are in the same filesystem""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput_same_file_system + + test_percentage = perfmon.get_percent_log_fs_usage(None) + self.assertTrue(math.isnan(test_percentage)) + + def test_get_percent_fs_usage(self, mock_getoutput): + """Assert that get_percent_fs_usage returns as expected""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput + + expected_percentage = 0.13 + test_percentage = perfmon.get_percent_fs_usage(None) + self.assertAlmostEqual(test_percentage, expected_percentage, 7) + + +class TestGetMemUsage(unittest.TestCase): + '''Test get_percent_mem_usage ''' + + meminfo = '''MemTotal: 2580464 kB + MemFree: 1511024 kB + MemAvailable: 2210924 kB + Buffers: 95948 kB + Cached: 518164 kB + SwapCached: 0 kB + Active: 424468 kB + Inactive: 390016 kB + Active(anon): 207944 kB + Inactive(anon): 8740 kB + Active(file): 216524 kB + Inactive(file): 381276 kB + Unevictable: 13620 kB + Mlocked: 13620 kB + SwapTotal: 1048572 kB + SwapFree: 1048572 kB''' + @patch("builtins.open", new_callable=mock_open, read_data=meminfo) + def test_get_percent_mem_usage(self, _): + self.assertAlmostEqual(perfmon.get_percent_mem_usage([]), 0.17645198692948244) + + @patch('builtins.open', side_effect=Exception) + def test_get_percent_mem_usage_exception(self, _): + self.assertEqual(perfmon.get_percent_mem_usage(None), 0.0) + + +class TestGetPercentSRUsage(unittest.TestCase): + '''Test get_percent_sr_usage ''' + + def test_get_percent_sr_usage_correct_input(self): + input_list = [100, 200] + expected_result = 0.5 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_incorrect_input(self): + input_list = [100] # Incorrect input, expecting two values + expected_result = 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_zero_division(self): + input_list = [0, 200] # Physical utilization is 0 + expected_result = 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_exception_handling(self): + input_list = ["invalid", 200] # Invalid input, should raise an exception + expected_result = 0.0 # Since exception is handled, function should return 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + +class TestAverage(unittest.TestCase): + '''Test get_percent_sr_usage ''' + def test_average_empty_list(self): + result = perfmon.average([]) + self.assertEqual(result, 0.0) + + def test_average_single_element_list(self): + result = perfmon.average([5]) + self.assertEqual(result, 5.0) + + def test_average_positive_numbers(self): + result = perfmon.average([1, 2, 3, 4, 5]) + self.assertEqual(result, 3.0) + + +class TestUpdateAllXMLConfigs(unittest.TestCase): + '''Test update_all_xmlconfigs''' + def test_update_all_xmlconfigs(self): + + perfmon.all_xmlconfigs = {} + perfmon.sruuids_by_hostuuid = {} + + host_uuid = '28a574e4-bf57-4476-a83d-72cba7578d23' + vm_uuid = '2cf37285-57bc-4633-a24f-0c6c825dda66' + sr_uuid = '0e7f8fb3-1ba2-4bce-9889-48812273a316' + perfmon_config = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' + + mock_session = MagicMock() + mock_session.xenapi.host.get_all_records.return_value = { + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a':{ + 'uuid': host_uuid, + 'name_label': 'xrtuk-11-43', + 'name_description': 'Default install', + 'memory_overhead': '631816192', + 'software_version': { + 'product_version': '8.4.0', 'product_version_text': '8', + 'product_version_text_short': '8', 'platform_name': 'XCP', + 'platform_version': '3.4.0', 'product_brand': 'XenServer', + 'build_number': 'stream', 'git_id': '0', 'hostname': 'localhost', + 'date': '20240229T15:07:05Z', 'dbv': '2024.0229', + 'is_preview_release': 'false', 'xapi': '24.11', + 'xapi_build': '24.11.0', 'xen': '4.17.3-4', + 'linux': '4.19.0+1', 'xencenter_min': '2.21', + 'xencenter_max': '2.21', 'network_backend': 'openvswitch', + 'db_schema': '5.775'}, + 'other_config': { + 'iscsi_iqn': 'iqn.2024-03.xenrtcloud:339cd227', + 'agent_start_time': '1710910331.', + 'boot_time': '1710910266.', + 'perfmon': perfmon_config} + } + } + mock_session.xenapi.VM.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': vm_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + } + } + } + mock_session.xenapi.SR.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': sr_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + }, + 'PBDs': ['pbd1', 'pbd2'] + } + } + # One SR is connected to two hosts + mock_session.xenapi.PBD.get_host.return_value = \ + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a' + + + # Call the function to test + perfmon.update_all_xmlconfigs(mock_session) + + # Check that all_xmlconfigs and sruuids_by_hostuuid were updated correctly + expect_xmlconfigs = { + host_uuid: perfmon_config, + vm_uuid: perfmon_config, + sr_uuid: perfmon_config + } + self.assertEqual(perfmon.all_xmlconfigs, expect_xmlconfigs) + print(perfmon.sruuids_by_hostuuid) + self.assertEqual(perfmon.sruuids_by_hostuuid, {host_uuid: {sr_uuid}}) + +class TestObjectReport(unittest.TestCase): + '''Test Class ObjectReport ''' + def setUp(self): + # Create an instance of ObjectReport for testing + self.obj_report = perfmon.ObjectReport(objtype="vm", + uuid="e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e") + + def test_get_uuid(self): + self.assertEqual(self.obj_report.get_uuid(), + "e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e") + + def test_get_var_names(self): + # Initially, there are no variables, so the list should be empty + self.assertEqual(self.obj_report.get_var_names(), []) + + # Insert a variable and check if it appears in the list + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.get_var_names(), ["cpu_usage"]) + + def test_get_value(self): + # Insert a value for a variable and retrieve it + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.get_value("cpu_usage", 0), 0.5) + + # Trying to retrieve a value for a non-existing variable should return 0.0 + self.assertEqual(self.obj_report.get_value("memory_usage", 0), 0.0) + + def test_insert_value(self): + # Insert a value for a variable and check if it's stored correctly + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.vars["cpu_usage"], [0.5]) + + # Insert another value for the same variable and check if it's stored correctly + self.obj_report.insert_value("cpu_usage", 1, 0.6) + self.assertEqual(self.obj_report.vars["cpu_usage"], [0.5, 0.6]) + + +@patch("perfmon.XapiSession") +@patch("perfmon.get_percent_fs_usage") +@patch("perfmon.get_percent_log_fs_usage") +@patch("perfmon.get_percent_mem_usage") +class TestVMMonitor(unittest.TestCase): + '''Test getting VM performance data from VMMonitor''' + + def test_process_rrd_updates(self, mock_get_percent_mem_usage, + mock_get_percent_log_fs_usage, + mock_get_percent_fs_usage, + mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + + + + + + + + + + + + '''} + monitor = perfmon.VMMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'memory': [2785000000.0, 2785000000.0, 2785000000.0, + 2785000000.0, 2785000000.0] + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 1 + session = mock_xapisession() + + mock_get_percent_fs_usage.return_value = 0.12 + mock_get_percent_mem_usage.return_value = 0.17380 + mock_get_percent_log_fs_usage.return_value = float("NaN") + monitor.process_rrd_updates(rrd_updates, session) + mock_get_percent_fs_usage.assert_called() + mock_get_percent_log_fs_usage.assert_called() + mock_get_percent_mem_usage.assert_called() + self.assertAlmostEqual(monitor.variables[0].value, 0.12) + self.assertAlmostEqual(monitor.variables[1].value, 0.17380) + self.assertTrue(math.isnan(monitor.variables[2].value)) + + +class TestHOSTMonitor(unittest.TestCase): + '''Test getting HOST performance data from HOSTMonitor''' + + @patch("perfmon.XapiSession") + def test_process_rrd_updates(self, mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + '''} + monitor = perfmon.HOSTMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'memory': [2785000000.0, 2785000000.0, 2785000000.0, + 2785000000.0, 2785000000.0] + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 5 + session = mock_xapisession() + + monitor.process_rrd_updates(rrd_updates, session) + # Average of cpu0-cpu7 (row 5) + # [0.0048923, 0.0053645, 0.0056833, 0.0048769, + # 0.0050993, 0.0062017, 0.0050934, 0.0049544] + self.assertAlmostEqual(monitor.variables[0].value, 0.005270725) + + def test_refresh_config(self): + perfmon.all_xmlconfigs = {} + perfmon.sruuids_by_hostuuid = {} + + host_uuid = '28a574e4-bf57-4476-a83d-72cba7578d23' + sr_uuid = '0e7f8fb3-1ba2-4bce-9889-48812273a316' + perfmon_config = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' + + mock_session = MagicMock() + mock_session.xenapi.host.get_all_records.return_value = { + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a':{ + 'uuid': host_uuid, + 'other_config': { + 'iscsi_iqn': 'iqn.2024-03.xenrtcloud:339cd227', + 'agent_start_time': '1710910331.', + 'boot_time': '1710910266.', + 'perfmon': perfmon_config} + } + } + mock_session.xenapi.SR.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': sr_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + }, + 'PBDs': ['pbd1', 'pbd2'] + } + } + mock_session.xenapi.PBD.get_host.return_value = \ + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a' + perfmon.update_all_xmlconfigs(mock_session) + monitor = perfmon.HOSTMonitor(host_uuid) + monitor.refresh_config() + expected_sruuids = {sr_uuid} + self.assertEqual(set(monitor.secondary_xmlconfigs), expected_sruuids) + + +@patch("perfmon.XapiSession") +class TestSRMonitor(unittest.TestCase): + '''Test getting SR performance data from SrMonitor''' + def test_process_rrd_updates(self, mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + '''} + monitor = perfmon.SRMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'size': [100, 200, 300, 400, 500], + 'physical_utilisation': [2000, 3000, 4000, 5000, 6000], + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 5 + session = mock_xapisession() + + monitor.process_rrd_updates(rrd_updates, session) + # get_percent_sr_usage([500, 6000]) + self.assertAlmostEqual(monitor.variables[0].value, 0.08333333333333333) + + +class TestRRDUpdates(unittest.TestCase): + '''Test Class RRDUpdates and RRDContentHandler''' + + @patch('time.time', return_value=100000) + def test_init(self, _): + rrd_updates = perfmon.RRDUpdates() + + expected_start = 100000 - perfmon.interval + self.assertEqual(rrd_updates.params['start'], expected_start) + self.assertEqual(rrd_updates.params["host"], "true") + self.assertEqual(rrd_updates.params["sr_uuid"], "all") + self.assertEqual(rrd_updates.params["cf"], "AVERAGE") + self.assertEqual(rrd_updates.params["interval"], str(perfmon.rrd_step)) + + + @patch('time.time', return_value=100000) + @patch("perfmon.XapiSession") + @patch('urllib.request.urlopen') + def test_refresh(self, mock_urlopen, mock_xapisession, _): + rrd_updates = perfmon.RRDUpdates() + + # mock_session + mock_session = mock_xapisession() + mock_session.id.return_value = "mocked_session_id" + + # mock xmlsource + xml = r''' + + 1213578000 + 3600 + 1213617600 + 2 + 12 + + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + + + + + 1213617600 # The first row corresponds to end time + 0.0 + 0.0282 + 209715200.0000 + 0.0 + 0.0201 + 209715200.0000 + 0.0 + 0.0445 + 209715200.0000 + 0.0 + 0.0243 + 209715200.0000 + + + 1213616600 #The last row corresponds to Start time + 0.0 + 0.0282 + 209715200.0000 + 0.0 + 0.0201 + 209715200.0000 + 0.0 + 0.0445 + 209715200.0000 + 0.0 + 0.0243 + 209715200.0000 + + +''' + xml_rrdupdates = xml.encode(encoding='utf-8') + cm = MagicMock() + cm.read.return_value = xml_rrdupdates + cm.__enter__.return_value = cm + mock_urlopen.return_value = cm + rrd_updates.refresh(mock_session) + + # Test __repr__ + print(rrd_updates) + + self.assertEqual(rrd_updates.get_num_rows(), 2) + self.assertIsNotNone( + rrd_updates.get_obj_report_by_uuid("ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3") + ) + self.assertIsNone( + rrd_updates.get_obj_report_by_uuid("123345") + ) + self.assertEqual(rrd_updates.get_uuid_list_by_objtype("vm"), + ["ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3"]) + + +class TestVariable(unittest.TestCase): + '''Test Class Varible''' + + def test_set_active(self): + # Construct varible node for VaribleConfig + # Not used, just for input + xmlconfig = b'' \ + b'' + xmldoc = perfmon.minidom.parseString(xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + node = variable_nodes[0] + + # Construct function alarm_create and mock_get_default_varible_config + # Not used, just for input + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + monitor = perfmon.VMMonitor(uuid) + var = perfmon.Variable(node, monitor.alarm_create, + monitor.get_default_variable_config) + + # Call set_active with active=True + var.set_active(True) + self.assertTrue(var.active) + + # Call set_active with active=False + var.set_active(False) + self.assertFalse(var.active) + + @patch("perfmon.XapiSession") + def test_update(self, mock_xapisession): + xmlconfig = b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' + xmldoc = perfmon.minidom.parseString(xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + node = variable_nodes[0] + + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + monitor = perfmon.VMMonitor(uuid) + var = perfmon.Variable(node, monitor.alarm_create, + monitor.get_default_variable_config) + + session = mock_xapisession() + + # Trigger alarm + var.trigger_down_counter = 50 + var.update(0.95,session) + self.assertEqual(var.trigger_down_counter, 60) + + # Not trigger alarm - time isn't up + var.trigger_down_counter = 100 + var.update(0.95,session) + self.assertEqual(var.trigger_down_counter, 40) + + # Not trigger alarm - level good + var.trigger_down_counter = 50 + var.update(0.8,session) + self.assertEqual(var.trigger_down_counter, 60) + +if __name__ == '__main__': + unittest.main() diff --git a/python3/tests/test_static_vdis.py b/python3/tests/test_static_vdis.py new file mode 100644 index 00000000000..ef4e24d7f31 --- /dev/null +++ b/python3/tests/test_static_vdis.py @@ -0,0 +1,134 @@ +"""python3/tests/test_static_vdis.py: Test the static-vdis script""" + +import os +import sys +from pathlib import Path +from types import ModuleType + +import pytest + +from python3.tests.import_helper import import_file_as_module, mocked_modules + +# ---------------------------- Test fixtures --------------------------------- + + +@pytest.fixture(scope="function") # function scope: Re-run for each test function +def static_vdis() -> ModuleType: + """Test fixture to return the static-vdis module, mocked to avoid dependencies.""" + with mocked_modules("XenAPI", "inventory"): + return import_file_as_module("python3/bin/static-vdis") + + +# Hide pylint warnings for redefined-outer-name from using the static_vdis fixture: +# pylint: disable=redefined-outer-name +# Allow to access attributes of the static_vdis module from this test module: +# pyright: reportAttributeAccessIssue=false + +# ----------------------------- Test cases ----------------------------------- + + +def test_whole_file(static_vdis: ModuleType, tmp_path): + """Test read_whole_file() and write_whole_file()""" + + with open(__file__, encoding="utf-8") as data: + contents = data.read().strip() + assert static_vdis.read_whole_file(__file__) == contents + assert static_vdis.write_whole_file(tmp_path / "temp_file", contents) is None + with open(tmp_path / "temp_file", encoding="utf-8") as written_data: + assert written_data.read().strip() == contents + + +def test_attach(static_vdis: ModuleType, tmpdir, mocker, capsys): + """Test five common and SMAPIv1 code paths in the attach() function""" + + # Path 1: When the VDI is not found, expect attach() to raise an exception: + static_vdis.list_vdis = lambda: [{"vdi-uuid": "existing-uuid"}] + with pytest.raises(Exception) as exc_info: + static_vdis.attach("nonexisting-uuid") + assert exc_info.value.args[0] == "Disk configuration not found" + + # Path 2: When the VDI is already attached, expect main():attach to return None\n: + static_vdis.list_vdis = lambda: [{"vdi-uuid": "attached", "path": "/attached"}] + sys.argv = ["static-vdis", "attach", "attached"] + static_vdis.main() + with capsys.disabled(): + assert capsys.readouterr().out == "None\n" + + # Path 3: When the VDI is not attached, attach() to return "the-id/disk": + vdis: list[dict[str, str]] = [{"vdi-uuid": "attach-uuid", "id": "the-id"}] + static_vdis.list_vdis = lambda: vdis + static_vdis.call_backend_attach = lambda driver, config: "/mock-attached-path" + static_vdis.read_whole_file = lambda path: '{"json":true}' + disk = tmpdir.mkdir(vdis[0]["id"]).join("disk") + static_vdis.main_dir = str(tmpdir) + assert static_vdis.attach("attach-uuid") == disk + assert os.readlink(disk) == "/mock-attached-path" + os.unlink(disk) + + # Path 4: Create the disk file expect it to be deleted and replaced by a symlink: + disk.write("mock-disk-contents-to-delete") + assert static_vdis.attach("attach-uuid") == disk + assert os.readlink(disk) == "/mock-attached-path" + + # Path 5: When the backend call returns None, expect attach() to raise TypeError + static_vdis.call_backend_attach = lambda driver, config: None + with pytest.raises(TypeError) as exc_info: + static_vdis.attach("attach-uuid") + + # Path 6: When the backend returns an empty str, attach() raises FileNotFoundError: + static_vdis.call_backend_attach = lambda driver, config: "" + with pytest.raises(FileNotFoundError) as exc_info: + static_vdis.attach("attach-uuid") + + # Path 7: If the smapiv3_config exists, but not the volume plugin, attach() fails: + with pytest.raises(FileNotFoundError) as exc_info: + mocker.patch("os.path.exists", return_value=True) + static_vdis.MULTIPATH_FLAG = __file__ + static_vdis.attach("attach-uuid") + + +def test_fresh_name(static_vdis: ModuleType, tmp_path: Path): + """Test fresh_name() and list_vdis() - all code paths""" + + # When the freshly created tmp_path is empty, expect [] and "0": + static_vdis.main_dir = tmp_path.as_posix() + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + # When main_dir contains a directory with name "0", the next name should be "1": + os.mkdir(static_vdis.main_dir + "/0") + assert static_vdis.fresh_name() == "1" + + # When main_dir contains a directory with name "1", the next name should be "2": + os.mkdir(static_vdis.main_dir + "/1") + assert static_vdis.fresh_name() == "2" + + # When main_dir does not exist, an empty list and 0 should be returned: + static_vdis.main_dir = tmp_path.as_posix() + "/does-not-exist" + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + +def test_sr_attach(static_vdis: ModuleType, mocker): + """Test sr_attach()""" + + # We need to mock those as they would attempt to load the volume plugin and + # check the clusterstack, which are not available in the test environment: + static_vdis.call_volume_plugin = mocker.MagicMock() + static_vdis.check_clusterstack = mocker.MagicMock() + + # Set the return value of the mocked functions to success: + static_vdis.call_volume_plugin.return_value = "success" + static_vdis.check_clusterstack.return_value = "success" + + # Call the sr_attach function + device_config = {"key1": "value1", "key2": "value2"} + result = static_vdis.sr_attach("plugin_name", device_config) + + # Assert the expected behavior + assert result == "success" + static_vdis.call_volume_plugin.assert_called_once_with( + "plugin_name", + "SR.attach", + ["--configuration", "key1", "value1", "--configuration", "key2", "value2"], + ) diff --git a/python3/tests/test_usb_reset_mount.py b/python3/tests/test_usb_reset_mount.py new file mode 100644 index 00000000000..e9d432742f6 --- /dev/null +++ b/python3/tests/test_usb_reset_mount.py @@ -0,0 +1,14 @@ +"""scripts/unit_test/test_usb_reset_mount.py: Test usb_reset.mount and .umount""" +from __future__ import print_function + +from python3.tests.import_helper import import_file_as_module, mocked_modules + + +def test_usb_reset_mount_umount(private_mount_namespace): + """Test usb_reset.mount and .umount""" + assert private_mount_namespace + with mocked_modules("xcp", "xcp.logger"): + usb_reset = import_file_as_module("python3/libexec/usb_reset.py") + usb_reset.log.error = print + usb_reset.mount(source="tmpfs", target="/tmp", fs="tmpfs") + usb_reset.umount("/tmp") diff --git a/scripts/test_usb_scan.py b/python3/tests/test_usb_scan.py similarity index 50% rename from scripts/test_usb_scan.py rename to python3/tests/test_usb_scan.py index c64d89d8276..8b886194c74 100644 --- a/scripts/test_usb_scan.py +++ b/python3/tests/test_usb_scan.py @@ -1,36 +1,33 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # unittest for usb_scan.py -try: - from collections.abc import Mapping, Container, Iterable -except ImportError: # python2 - from collections import Mapping, Container, Iterable -import mock import os import shutil import sys import tempfile import unittest +from collections.abc import Mapping +from typing import cast -def nottest(obj): - obj.__test__ = False - return obj +import mock +from python3.tests.import_helper import import_file_as_module +# mock modules to avoid dependencies sys.modules["xcp"] = mock.Mock() sys.modules["xcp.logger"] = mock.Mock() sys.modules["pyudev"] = mock.Mock() +usb_scan = import_file_as_module("python3/libexec/usb_scan.py") class MocDeviceAttrs(Mapping): def __init__(self, device): self.d = device.get_attr() - def __iter__(self): - for name in self.d: - yield name + def __iter__(self): # pragma: no cover + yield from self.d - def __len__(self): + def __len__(self): # pragma: no cover return len(self.d) def __getitem__(self, name): @@ -38,7 +35,6 @@ def __getitem__(self, name): class MocDevice(Mapping): - def __init__(self, d): self.d = d @@ -56,19 +52,17 @@ def get_attr(self): def attributes(self): return MocDeviceAttrs(self) - def __iter__(self): - for name in self.get_prop(): - yield name + def __iter__(self): # pragma: no cover + yield from self.get_prop() - def __len__(self): + def __len__(self): # pragma: no cover return len(self.get_prop()) def __getitem__(self, name): return self.get_prop().get(name) - -class MocEnumerator(object): - +# pylint: disable=too-few-public-methods +class MocEnumerator(): def __init__(self, ds): self.ds = ds @@ -76,96 +70,100 @@ def __iter__(self): for d in self.ds: yield MocDevice(d) - -class MocContext(object): - +# pylint: disable=too-few-public-methods +class MocContext(): def __init__(self, devices, interfaces): self.devices = devices self.interfaces = interfaces def list_devices(self, **kwargs): - if "usb" == kwargs.pop("subsystem"): - dev_type = kwargs.pop("DEVTYPE") - if "usb_device" == dev_type: - return MocEnumerator(self.devices) - elif "usb_interface" == dev_type: - return MocEnumerator(self.interfaces) - return MocEnumerator([]) + assert kwargs.pop("subsystem") == "usb" + dev_type = kwargs.pop("DEVTYPE") + if dev_type == "usb_device": + return MocEnumerator(self.devices) + elif dev_type == "usb_interface": + return MocEnumerator(self.interfaces) + raise AssertionError(f"unexpected {dev_type}") # pragma: no cover def mock_setup(mod, devices, interfaces, path): - mod.log.error = test_log - mod.log.debug = test_log + mod.log.error = verify_log + mod.log.debug = verify_log mod.Policy._PATH = path - mod.pyudev.Context = mock.Mock(return_value=MocContext( - devices, interfaces)) + mod.pyudev.Context = mock.Mock( + return_value=MocContext(devices, interfaces)) -@nottest -def test_log(m): +def verify_log(_): pass class TestUsbScan(unittest.TestCase): - def setUp(self): - try: - self.work_dir = tempfile.mkdtemp(prefix="test_usb_scan") - except: - raise + self.work_dir = tempfile.mkdtemp(prefix="test_usb_scan") def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) - @nottest - def test_usb_common(self, moc_devices, moc_interfaces, moc_results, - path="./scripts/usb-policy.conf"): - import usb_scan + def verify_usb_common( + self, moc_devices, + moc_interfaces, + moc_results, + # Use relative path to allow tests to be started in subdirectories + path = os.path.dirname(__file__) + "/../../scripts/usb-policy.conf" + ): + mock_setup(usb_scan, moc_devices, moc_interfaces, path) devices, interfaces = usb_scan.get_usb_info() + usb_scan.log_list(devices) + usb_scan.log_list(interfaces) + pusbs = usb_scan.make_pusbs_list(devices, interfaces) # pass pusbs in json to XAPI self.assertEqual(sorted(pusbs), sorted(moc_results)) - @nottest - def test_usb_exit(self, devices, interfaces, results, - path="./scripts/usb-policy.conf", msg=""): + def verify_usb_exit( + self, devices, interfaces, results, + path="./scripts/usb-policy.conf", + msg="" + ): with self.assertRaises(SystemExit) as cm: - self.test_usb_common(devices, interfaces, results, path) + self.verify_usb_common(devices, interfaces, results, path) if msg: - self.assertIn(msg, cm.exception.code) + # cm.exception.code is int type whose format + # looks like "duplicated tag'vid' found, + # malformed line ALLOW:vid=056a vid=0314 class=03" + self.assertIn(msg, cast(str, cm.exception.code)) # code is a str def test_usb_dongle(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": " 1", - "bConfigurationValue": "1", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - "speed": "480" - } + "idVendor": b"096e", + "bNumInterfaces": b" 1", + "bConfigurationValue": b"1", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + "speed": b"480", + }, } ] interfaces = [ { "name": "1-2:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"00", + }, } ] results = [ @@ -178,39 +176,37 @@ def test_usb_dongle(self): "vendor-id": "096e", "path": "1-2", "serial": "", - "speed": "480" + "speed": "480", } ] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_dongle_on_hub(self): devices = [ { "name": "1-2.1", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": " 1", - "bConfigurationValue": "1", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - "speed": "12" - } + "idVendor": b"096e", + "bNumInterfaces": b" 1", + "bConfigurationValue": b"1", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + "speed": b"12", + }, } ] interfaces = [ { "name": "1-2.1:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"00", + }, } ] results = [ @@ -223,153 +219,144 @@ def test_usb_dongle_on_hub(self): "vendor-id": "096e", "path": "1-2.1", "serial": "", - "speed": "12" + "speed": "12", } ] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_dongle_unbinded(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": "", - "bConfigurationValue": "", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - } + "idVendor": b"096e", + "bNumInterfaces": b"", + "bConfigurationValue": b"", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + }, } ] - interfaces = [ - ] - results = [ - ] - self.test_usb_common(devices, interfaces, results) + interfaces = [] + results = [] + self.verify_usb_common(devices, interfaces, results) def test_usb_keyboard(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Dell Computer Corp." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Dell Computer Corp."}, "attrs": { - "idVendor": "413c", - "bNumInterfaces": " 2", - "bConfigurationValue": "1", - "bcdDevice": "0110", - "version": " 2.00", - "idProduct": "2113", - "bDeviceClass": "00", - } + "idVendor": b"413c", + "bNumInterfaces": b" 2", + "bConfigurationValue": b"1", + "bcdDevice": b"0110", + "version": b" 2.00", + "idProduct": b"2113", + "bDeviceClass": b"00", + }, } ] interfaces = [ { "name": "1-2:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "01", - "bInterfaceProtocol": "01", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"01", + "bInterfaceProtocol": b"01", + "bInterfaceNumber": b"00", + }, }, { "name": "1-2:1.1", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "01", - } - } - ] - results = [ + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"01", + }, + }, ] - self.test_usb_common(devices, interfaces, results) + results = [] + self.verify_usb_common(devices, interfaces, results) def test_usb_config_missing(self): - self.test_usb_exit([], [], [], "not_exist.conf") + self.verify_usb_exit([], [], [], "not_exist.conf") - @nottest - def test_usb_config_error_common(self, content, msg): + def verify_usb_config_error_common(self, content, msg): path = os.path.join(self.work_dir, "usb-policy.conf") with open(path, "w") as f: f.write(content) - self.test_usb_exit([], [], [], path, msg) + self.verify_usb_exit([], [], [], path, msg) def test_usb_config_error_unexpected_chars_with_comment(self): content = """ss# unexpected words with comment ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.verify_usb_config_error_common(content, "to unpack") def test_usb_config_error_duplicated_key(self): content = """# duplicated key word ALLOW:vid=056a vid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "duplicated tag") + self.verify_usb_config_error_common(content, "duplicated tag") def test_usb_config_error_invalid_key(self): content = """# invalid key word ALLOW:vid=056a psid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_hex_length_4(self): content = """# hex length not 4 ALLOW:vid=056a pid=031 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "length error") + self.verify_usb_config_error_common(content, "length error") def test_usb_config_error_hex_length_2(self): content = """# hex length not 2 DENY:vid=056a pid=0314 class=035 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "length error") + self.verify_usb_config_error_common(content, "length error") def test_usb_config_error_action_key(self): content = """# wrong action key word ALLOWED:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed action") + self.verify_usb_config_error_common(content, "Malformed action") def test_usb_config_error_unexpected_chars_end(self): content = """# unexpected words in the end ALLOW:vid=056a pid=0314 class=03 kk # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_unexpected_chars_beg(self): content = """# unexpected words at the beginning ii ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed action") + self.verify_usb_config_error_common(content, "Malformed action") def test_usb_config_error_unexpected_chars_mid(self): content = """# unexpected words in the middle ALLOW:vid=056a pid=0314 jj class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_unexpected_non_empty_line(self): content = """# unexpected non empty line @@ -377,13 +364,11 @@ def test_usb_config_error_unexpected_non_empty_line(self): aa ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.verify_usb_config_error_common(content, "to unpack") def test_usb_config_error_missing_colon(self): content = """# missing colon after action ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.verify_usb_config_error_common(content, "to unpack") diff --git a/pytype_reporter.py b/pytype_reporter.py index 877dc29c9d8..b94ed948786 100755 --- a/pytype_reporter.py +++ b/pytype_reporter.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """GitHub action workflow Runner for pytype which works also locally without GitHub""" import json import re @@ -599,10 +599,11 @@ def main(): config_file = "pyproject.toml" config = load_config(config_file, basename(__file__)) config.setdefault("expected_to_fail", []) - debug("Expected to fail: %s", ", ".join(config["expected_to_fail"])) changed_but_in_expected_to_fail = [] - if config["expected_to_fail"] != []: + if config["expected_to_fail"]: + debug("Expected to fail: %s", ", ".join(config["expected_to_fail"])) + changed_but_in_expected_to_fail = git_diff( "--name-only", find_branch_point(config), diff --git a/quality-gate.sh b/quality-gate.sh index 4c3c1da3a3a..8e5a6ce8c26 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=511 + N=510 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) @@ -40,7 +40,7 @@ mli-files () { } structural-equality () { - N=10 + N=11 EQ=$(git grep -r --count ' == ' -- '**/*.ml' ':!ocaml/sdk-gen/**/*.ml' | cut -d ':' -f 2 | paste -sd+ - | bc) if [ "$EQ" -eq "$N" ]; then echo "OK counted $EQ usages of ' == '" diff --git a/scripts/Makefile b/scripts/Makefile index b47c36f5358..4c04da3943c 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -1,8 +1,5 @@ include ../config.mk -SITE_DIR=$(shell python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") - IPROG=./install.sh 755 IDATA=./install.sh 644 @@ -22,15 +19,12 @@ install: mkdir -p $(DESTDIR)/usr/lib/systemd/system mkdir -p $(DESTDIR)/usr/lib/yum-plugins mkdir -p $(DESTDIR)$(OPTDIR)/packages/post-install-scripts - mkdir -p $(DESTDIR)$(SITE_DIR) - mkdir -p $(DESTDIR)$(SITE3_DIR) mkdir -p $(DESTDIR)/etc/systemd/system/stunnel@xapi.service.d/ $(IPROG) base-path $(DESTDIR)/etc/xapi.d $(IPROG) sm_diagnostics $(DESTDIR)$(LIBEXECDIR) $(IPROG) xn_diagnostics $(DESTDIR)$(LIBEXECDIR) $(IPROG) thread_diagnostics $(DESTDIR)$(LIBEXECDIR) $(IPROG) list_plugins $(DESTDIR)$(LIBEXECDIR) - $(IPROG) nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/xapi mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/xenopsd mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/observer @@ -43,7 +37,6 @@ install: $(IPROG) fence $(DESTDIR)$(LIBEXECDIR) $(IPROG) xha-lc $(DESTDIR)$(LIBEXECDIR) $(IPROG) xapi-health-check $(DESTDIR)$(LIBEXECDIR) - $(IPROG) mail-alarm $(DESTDIR)$(LIBEXECDIR) $(IDATA) audit-logrotate $(DESTDIR)/etc/logrotate.d/audit $(IDATA) xapi-logrotate.conf $(DESTDIR)/etc/logrotate.d/xapi $(IPROG) xapi-tracing-log-trim.sh $(DESTDIR)$(LIBEXECDIR) @@ -63,7 +56,6 @@ install: $(IDATA) cdrommon@.service $(DESTDIR)/usr/lib/systemd/system/cdrommon@.service $(IDATA) gencert.service $(DESTDIR)/usr/lib/systemd/system/gencert.service $(IDATA) xapi-domains.service $(DESTDIR)/usr/lib/systemd/system/xapi-domains.service - $(IDATA) perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service $(IDATA) generate-iscsi-iqn.service $(DESTDIR)/usr/lib/systemd/system/generate-iscsi-iqn.service $(IDATA) xapi.service $(DESTDIR)/usr/lib/systemd/system/xapi.service $(IDATA) attach-static-vdis.service $(DESTDIR)/usr/lib/systemd/system/attach-static-vdis.service @@ -91,15 +83,12 @@ install: $(IPROG) update-ca-bundle.sh $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(OPTDIR)/debug $(IPROG) debug_ha_query_liveset $(DESTDIR)$(OPTDIR)/debug - $(IPROG) xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-mount-iso-sr $(DESTDIR)$(OPTDIR)/bin - $(IPROG) xe-reset-networking $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-toolstack-restart $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-xentrace $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-edit-bootloader $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-get-network-backend $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-enable-all-plugin-metrics $(DESTDIR)$(OPTDIR)/bin - $(IPROG) static-vdis $(DESTDIR)$(OPTDIR)/bin $(IPROG) with-vdi $(DESTDIR)$(OPTDIR)/debug $(IPROG) import-update-key $(DESTDIR)$(OPTDIR)/debug $(IPROG) pool.conf $(DESTDIR)$(ETCXENDIR) @@ -107,37 +96,18 @@ install: $(IPROG) pam.d-xapi $(DESTDIR)/etc/pam.d/xapi $(IPROG) upload-wrapper logs-download $(DESTDIR)$(LIBEXECDIR) $(IDATA) usb-policy.conf $(DESTDIR)$(ETCXENDIR) - $(IPROG) usb_reset.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) usb_scan.py $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(OPTDIR)/packages/iso #omg XXX $(IPROG) xapi-rolling-upgrade-miami $(DESTDIR)$(LIBEXECDIR)/xapi-rolling-upgrade $(IPROG) set-hostname $(DESTDIR)$(LIBEXECDIR) $(IPROG) update-mh-info $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-bugreport-upload $(DESTDIR)$(LIBEXECDIR)/host-bugreport-upload - $(IPROG) host-display $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin - $(IPROG) link-vms-by-sr.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) print-custom-templates $(DESTDIR)$(LIBEXECDIR) - $(IPROG) backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) - mkdir -p $(DESTDIR)/etc/sysconfig - $(IPROG) sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon - $(IPROG) perfmon $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/perfmon $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead $(IPROG) 10resetvdis $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead mkdir -p $(DESTDIR)/etc/bash_completion.d @@ -146,7 +116,6 @@ install: $(IPROG) xe-syslog-reconfigure $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-install-supplemental-pack $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-enable-ipv6 $(DESTDIR)$(OPTDIR)/bin - $(IPROG) hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) pv2hvm $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)/etc/cron.daily mkdir -p $(DESTDIR)/etc/cron.hourly @@ -156,29 +125,9 @@ install: mkdir -p $(DESTDIR)/etc/cron.d $(IDATA) xapi-tracing-log-trim.cron $(DESTDIR)/etc/cron.d/xapi-tracing-log-trim.cron mkdir -p $(DESTDIR)/opt/xensource/gpg -# templates - $(IPROG) templates/debian $(DESTDIR)$(OPTDIR)/packages/post-install-scripts/debian-etch - $(IPROG) templates/debug $(DESTDIR)$(OPTDIR)/packages/post-install-scripts # host-backup-restore $(IPROG) host-backup-restore/host-backup $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-backup-restore/host-restore $(DESTDIR)$(LIBEXECDIR) -# example/python -ifneq ($(BUILD_PY2), NO) - $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE_DIR)/ - $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE_DIR)/ - $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE_DIR)/ -endif - $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ - sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py - $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ - $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE3_DIR)/ - $(IPROG) examples/python/echo.py $(DESTDIR)$(PLUGINDIR)/echo - $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py -# poweron - $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py - $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan - $(IPROG) poweron/IPMI.py $(DESTDIR)$(PLUGINDIR)/IPMI.py - $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) $(IDATA) yum-plugins/accesstoken.conf $(DESTDIR)$(YUMPLUGINCONFDIR) @@ -191,4 +140,3 @@ endif $(IDATA) mail-languages/ja-JP.json $(DESTDIR)/etc/xapi.d/mail-languages # uefi mkdir -p $(DESTDIR)/etc/xapi.d/efi-clone - diff --git a/scripts/examples/python/exportimport.py b/scripts/examples/python/exportimport.py deleted file mode 100755 index bc72580659b..00000000000 --- a/scripts/examples/python/exportimport.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2014 Citrix, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Demonstrate how to -# - export raw disk images -# - import raw disk images -# - connect an export to an import to copy a raw disk image - -from __future__ import print_function -import sys, os, socket, urllib.request, urllib.error, urllib.parse, XenAPI, traceback, ssl, time - -def exportimport(url, xapi, session, src_vdi, dst_vdi): - # If an HTTP operation fails then it will record the error on the task - # object. Note you can't use the HTTP response code for this because - # it must be sent *before* the stream is processed. - import_task = xapi.xenapi.task.create("import " + dst_vdi, "") - export_task = xapi.xenapi.task.create("export " + src_vdi, "") - try: - # an HTTP GET of this will export a disk: - get_url = "/export_raw_vdi?session_id=%s&vdi=%s&task_id=%s" % (session, src_vdi, export_task) - # an HTTP PUT to this will import a disk: - put_url = "/import_raw_vdi?session_id=%s&vdi=%s&task_id=%s" % (session, dst_vdi, import_task) - - # 'data' is the stream of raw data: - data = urllib.request.urlopen(url + get_url) - - # python's builtin library doesn't support HTTP PUT very well - # so we do it manually. Note xapi doesn't support Transfer-encoding: - # chunked so we must send the data raw. - url = urllib.parse.urlparse(url) - host = url.netloc.split(":")[0] # assume port 443 - if url.scheme != "https": - print("Sorry, this example only supports HTTPS (not HTTP)", file=sys.stderr) - print("Plaintext HTTP has the following problems:", file=sys.stderr) - print(" - the data can be captured by other programs on the network", file=sys.stderr) - print(" - some network middleboxes will mangle the data", file=sys.stderr) - # time wasted debugging a problem caused by a middlebox: 3hrs - # Just use HTTPS! - return - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - output = ssl.wrap_socket(s) - output.connect((host, 443)) - - # HTTP/1.0 with no transfer-encoding - headers = [ - "PUT %s HTTP/1.0" % put_url, - "Connection:close", - "" - ] - print("Sending HTTP request:") - for h in headers: - output.send("%s\r\n" % h) - print("%s\r\n" % h) - result = output.recv(1024) - print("Received HTTP response:") - print(result) - if "200 OK" not in result: - print("Expected an HTTP 200, got %s" % result, file=sys.stderr) - return - - # Copy the raw bytes, signal completion by closing the socket - virtual_size = long(xapi.xenapi.VDI.get_virtual_size(src_vdi)) - print("Copying %Ld bytes" % virtual_size) - left = virtual_size - while left > 0: - block = data.read(min(65536, left)) - if block is None: - break - output.send(block) - left = left - len(block) - output.close() - - # Wait for the tasks to complete and check whether they both - # succeeded. It takes a few seconds to detach the disk etc. - finished = False - while not finished: - import_status = xapi.xenapi.task.get_status(import_task) - export_status = xapi.xenapi.task.get_status(export_task) - finished = import_status != "pending" and export_task != "pending" - time.sleep(1) - if import_status == "success" and export_status == "success": - print("OK") - else: - print("FAILED") - if import_status != "success": - print("The import task failed with: ", " ".join(xapi.xenapi.task.get_error_info(import_task))) - if export_status != "success": - print("The export task failed with: ", " ".join(xapi.xenapi.task.get_error_info(export_task))) - - finally: - # The task creator has to destroy them at the end: - xapi.xenapi.task.destroy(import_task) - xapi.xenapi.task.destroy(export_task) - -if __name__ == "__main__": - if len(sys.argv) != 5: - print("Usage:") - print(sys.argv[0], " ") - print(" -- creates a fresh VDI and streams the contents of into it.") - print() - print("Example:") - print("SR=$(xe pool-list params=default-SR --minimal)") - print("VDI=$(xe vdi-create sr-uuid=$SR name-label=test virtual-size=128MiB type=user)") - print(sys.argv[0], "https://localhost password $VDI") - sys.exit(1) - url = sys.argv[1] - username = sys.argv[2] - password = sys.argv[3] - vdi_uuid = sys.argv[4] - # First acquire a valid session by logging in: - xapi = XenAPI.Session(url) - xapi.xenapi.login_with_password(username, password, '1.0', 'xen-api-scripts-exportimport.py') - dst_vdi = None - try: - src_vdi = xapi.xenapi.VDI.get_by_uuid(vdi_uuid) - sr = xapi.xenapi.VDI.get_SR(src_vdi) - # Create an empty VDI with the same initial parameters (e.g. size) - # to upload into - vdi_args = xapi.xenapi.VDI.get_record(src_vdi) - dst_vdi = xapi.xenapi.VDI.create(vdi_args) - exportimport(url, xapi, xapi._session, src_vdi, dst_vdi) - except Exception as e: - print("Caught %s: trying to clean up" % str(e)) - traceback.print_exc() - if dst_vdi: - xapi.xenapi.VDI.destroy(dst_vdi) - finally: - xapi.xenapi.logout() diff --git a/scripts/examples/python/inventory.py b/scripts/examples/python/inventory.py deleted file mode 100644 index 9fd645b5d32..00000000000 --- a/scripts/examples/python/inventory.py +++ /dev/null @@ -1,32 +0,0 @@ -# Simple functions to read the constants from the xensource-inventory file - -INVENTORY="@INVENTORY@" -INSTALLATION_UUID="INSTALLATION_UUID" - - -def read_kvpairs(filename): - """Read in a file of key-value pairs in the format used by the inventory file""" - f = open(filename) - all_entries = {} - try: - for line in f.readlines(): - equals = line.index("=") - key = line[0:equals] - value = line[equals+1:].strip().strip("'") - all_entries[key] = value - finally: - f.close() - return all_entries - - -def parse(): - """Return the contents of the xensource inventory file as a dictionary""" - try: - return read_kvpairs(INVENTORY) - except: - return {} - - -def get_localhost_uuid(): - """Return the UUID of the local host""" - return parse()[INSTALLATION_UUID] diff --git a/scripts/examples/python/lvhd-api-test.py b/scripts/examples/python/lvhd-api-test.py deleted file mode 100644 index 4b7786d3f27..00000000000 --- a/scripts/examples/python/lvhd-api-test.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import XenAPI, sys - -def go(x, name): - vm = x.xenapi.VM.get_by_name_label(name)[0] - vbds = x.xenapi.VM.get_VBDs(vm) - non_empty = filter(lambda y:not(x.xenapi.VBD.get_empty(y)), vbds) - vdis = map(lambda y:x.xenapi.VBD.get_VDI(y), non_empty) - - print("Calling API call on %s" % (repr(vdis))) - result = x.xenapi.SR.lvhd_stop_using_these_vdis_and_call_script(vdis, "echo", "main", { "hello": "there", "sleep": "10" }) - print(repr(result)) - - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage:", file=sys.stderr) - print(" %s " % (sys.argv[0]), file=sys.stderr) - print(" -- Call SR.lvhd_stop_using_these_vdis_and_call_script with all VDIs with VBDs (attached or not) linking to specified VM", file=sys.stderr) - sys.exit(1) - name = sys.argv[1] - x = XenAPI.xapi_local() - x.xenapi.login_with_password("root", "", "1.0", "xen-api-scripts-lvhd-api-test.py") - try: - go(x, name) - finally: - x.xenapi.logout() diff --git a/scripts/examples/python/mini-xenrt.py b/scripts/examples/python/mini-xenrt.py deleted file mode 100644 index 0907132da80..00000000000 --- a/scripts/examples/python/mini-xenrt.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python3 - -# Receive multiple VMs -# Issue parallel loops of: reboot, suspend/resume, migrate - -from __future__ import print_function -import xmlrpc.client -from threading import Thread -import time, sys - -iso8601 = "%Y%m%dT%H:%M:%SZ" - -stop_on_first_failure = True -stop = False - -class Operation: - def __init__(self): - raise NotImplementedError - def execute(self, server, session_id): - raise NotImplementedError - -class Reboot(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - return server.VM.clean_reboot(session_id, self.vm) - def __str__(self): - return "clean_reboot(%s)" % self.vm - -class SuspendResume(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - x = { "ErrorDescription": [ "VM_MISSING_PV_DRIVERS" ] } - while "ErrorDescription" in x and x["ErrorDescription"][0] == "VM_MISSING_PV_DRIVERS": - x = server.VM.suspend(session_id, self.vm) - if "ErrorDescription" in x: - time.sleep(1) - if x["Status"] != "Success": - return x - return server.VM.resume(session_id, self.vm, False, False) - def __str__(self): - return "suspendresume(%s)" % self.vm - -class ShutdownStart(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - x = server.VM.clean_shutdown(session_id, self.vm) - if x["Status"] != "Success": - return x - return server.VM.start(session_id, self.vm, False, False) - #return { "Status": "bad", "ErrorDescription": "foo" } - def __str__(self): - return "shutdownstart(%s)" % self.vm - -class LocalhostMigrate(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - return server.VM.pool_migrate(session_id, self.vm, server.VM.get_resident_on(session_id, self.vm)["Value"], { "live": "true" } ) - def __str__(self): - return "localhostmigrate(%s)" % self.vm - -# Use this to give each thread a different ID -worker_count = 0 - -class Worker(Thread): - def __init__(self, server, session_id, operations): - Thread.__init__(self) - self.server = server - self.session_id = session_id - self.operations = operations - self.num_successes = 0 - self.num_failures = 0 - global worker_count - self.id = worker_count - worker_count = worker_count + 1 - def run(self): - global iso8601 - global stop_on_first_failure, stop - for op in self.operations: - description = str(op) - - if stop: - return - - start = time.strftime(iso8601, time.gmtime(time.time ())) - result = op.execute(self.server, self.session_id) - end = time.strftime(iso8601, time.gmtime(time.time ())) - - if result["Status"] == "Success": - print("SUCCESS %d %s %s %s" % (self.id, start, end, description)) - self.num_successes = self.num_successes + 1 - else: - error_descr = result["ErrorDescription"] - print("FAILURE %d %s %s %s %s" % (self.id, start, end, error_descr[0], description)) - self.num_failures = self.num_failures + 1 - if stop_on_first_failure: - stop = True - -def make_operation_list(vm): - return [ Reboot(vm), SuspendResume(vm), LocalhostMigrate(vm) ] * 100 - -if __name__ == "__main__": - if len(sys.argv) != 3: - print("Usage:") - print(" %s " % (sys.argv[0])) - print(" -- performs parallel operations on VMs with the specified other-config key") - sys.exit(1) - - x = xmlrpc.client.server(sys.argv[1]) - key = sys.argv[2] - session = x.session.login_with_password("root", "xenroot", "1.0", "xen-api-scripts-minixenrt.py")["Value"] - vms = x.VM.get_all_records(session)["Value"] - - workers = [] - for vm in vms.keys(): - if key in vms[vm]["other_config"]: - allowed_ops = vms[vm]["allowed_operations"] - for op in [ "clean_reboot", "suspend", "pool_migrate" ]: - if op not in allowed_ops: - raise RuntimeError("VM %s is not in a state where it can %s" % (vms[vm]["name_label"], op)) - workers.append(Worker(x, session, make_operation_list(vm))) - for w in workers: - w.start() - for w in workers: - w.join() - successes = 0 - failures = 0 - for w in workers: - successes = successes + w.num_successes - failures = failures + w.num_failures - print("Total successes = %d" % successes) - print("Total failures = %d" % failures) - if failures == 0: - print("PASS") - sys.exit(0) - else: - print("FAIL") - sys.exit(1) diff --git a/scripts/examples/python/monitor-unwanted-domains.py b/scripts/examples/python/monitor-unwanted-domains.py deleted file mode 100644 index 317725288e2..00000000000 --- a/scripts/examples/python/monitor-unwanted-domains.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import os, subprocess, XenAPI, inventory, time, sys - -# Script which monitors the domains running on a host, looks for -# paused domains which don't correspond to VMs which are running here -# or are about to run here, logs them and optionally destroys them. - -# Return a list of (domid, uuid) tuples, one per paused domain on this host -def list_paused_domains(): - results = [] - all = subprocess.Popen(["@OPTDIR@/bin/list_domains"], stdout=subprocess.PIPE).communicate()[0] - lines = all.split("\n") - for domain in lines[1:]: - bits = domain.split() - if bits != []: - domid = bits[0] - uuid = bits[2] - state = bits[4] - if 'P' in state: - results.append( (domid, uuid) ) - return results - -# Given localhost's uuid and a (domid, uuid) tuple, return True if the domain -# be somewhere else i.e. we think it may have leaked here -def should_domain_be_somewhere_else(localhost_uuid, domain): - (domid, uuid) = domain - try: - x = XenAPI.xapi_local() - x.xenapi.login_with_password("root", "", "1.0", "xen-api-scripts-monitor-unwanted-domains.py") - try: - try: - vm = x.xenapi.VM.get_by_uuid(uuid) - resident_on = x.xenapi.VM.get_resident_on(vm) - current_operations = x.xenapi.VM.get_current_operations(vm) - result = current_operations == {} and resident_on != localhost_uuid - if result: - log("domid %s uuid %s: is not being operated on and is not resident here" % (domid, uuid)) - return result - except XenAPI.Failure as e: - if e.details[0] == "UUID_INVALID": - # VM is totally bogus - log("domid %s uuid %s: is not in the xapi database" % (domid, uuid)) - return True - # fail safe for now - return False - finally: - x.xenapi.logout() - except: - return False - -def log(str): - print(str) - -# Destroy the given domain -def destroy_domain(domain): - (domid, uuid) = domain - log("destroying domid %s uuid %s" % (domid, uuid)) - all = subprocess.Popen(["@OPTDIR@/debug/destroy_domain", "-domid", domid], stdout=subprocess.PIPE).communicate()[0] - -# Keep track of when a domain first looked like it should be here -domain_first_noticed = {} - -# Number of seconds after which we conclude that a domain really shouldn't be here -threshold = 60 - -if __name__ == "__main__": - localhost_uuid = inventory.get_localhost_uuid () - while True: - time.sleep(1) - paused = list_paused_domains () - # GC the domain_first_noticed map - for d in domain_first_noticed.keys(): - if d not in paused: - log("domid %s uuid %s: looks ok now, forgetting about it" % d) - del domain_first_noticed[d] - - for d in list_paused_domains(): - if should_domain_be_somewhere_else(localhost_uuid, d): - if d not in domain_first_noticed: - domain_first_noticed[d] = time.time() - noticed_for = time.time() - domain_first_noticed[d] - if noticed_for > threshold: - log("domid %s uuid %s: has been in bad state for over threshold" % d) - if "-destroy" in sys.argv: - destroy_domain(d) - - diff --git a/scripts/examples/python/provision.py b/scripts/examples/python/provision.py deleted file mode 100644 index b8aa3f3935f..00000000000 --- a/scripts/examples/python/provision.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2007 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Parse/regenerate the "disk provisioning" XML contained within templates -# NB this provisioning XML refers to disks which should be created when -# a VM is installed from this template. It does not apply to templates -# which have been created from real VMs -- they have their own disks. - -from __future__ import print_function -import XenAPI -import xml.dom.minidom - -class Disk: - """Represents a disk which should be created for this VM""" - def __init__(self, device, size, sr, bootable): - self.device = device # 0, 1, 2, ... - self.size = size # in bytes - self.sr = sr # uuid of SR - self.bootable = bootable - def toElement(self, doc): - disk = doc.createElement("disk") - disk.setAttribute("device", self.device) - disk.setAttribute("size", self.size) - disk.setAttribute("sr", self.sr) - b = "false" - if self.bootable: b = "true" - disk.setAttribute("bootable", b) - return disk - -def parseDisk(element): - device = element.getAttribute("device") - size = element.getAttribute("size") - sr = element.getAttribute("sr") - b = element.getAttribute("bootable") == "true" - return Disk(device, size, sr, b) - -class ProvisionSpec: - """Represents a provisioning specification: currently a list of required disks""" - def __init__(self): - self.disks = [] - def toElement(self, doc): - element = doc.createElement("provision") - for disk in self.disks: - element.appendChild(disk.toElement(doc)) - return element - def setSR(self, sr): - """Set the requested SR for each disk""" - for disk in self.disks: - disk.sr = sr - -def parseProvisionSpec(txt): - """Return an instance of type ProvisionSpec given XML text""" - doc = xml.dom.minidom.parseString(txt) - all = doc.getElementsByTagName("provision") - if len(all) != 1: - raise ValueError("Expected to find exactly one element") - ps = ProvisionSpec() - disks = all[0].getElementsByTagName("disk") - for disk in disks: - ps.disks.append(parseDisk(disk)) - return ps - -def printProvisionSpec(ps): - """Return a string containing pretty-printed XML corresponding to the supplied provisioning spec""" - doc = xml.dom.minidom.Document() - doc.appendChild(ps.toElement(doc)) - return doc.toprettyxml() - -def getProvisionSpec(session, vm): - """Read the provision spec of a template/VM""" - other_config = session.xenapi.VM.get_other_config(vm) - return parseProvisionSpec(other_config['disks']) - -def setProvisionSpec(session, vm, ps): - """Set the provision spec of a template/VM""" - txt = printProvisionSpec(ps) - try: - session.xenapi.VM.remove_from_other_config(vm, "disks") - except: - pass - session.xenapi.VM.add_to_other_config(vm, "disks", txt) - -if __name__ == "__main__": - print("Unit test of provision XML spec module") - print("--------------------------------------") - ps = ProvisionSpec() - ps.disks.append(Disk("0", "1024", "0000-0000", True)) - ps.disks.append(Disk("1", "2048", "1111-1111", False)) - print("* Pretty-printing spec") - txt = printProvisionSpec(ps) - print(txt) - print("* Re-parsing output") - ps2 = parseProvisionSpec(txt) - print("* Pretty-printing spec") - txt2 = printProvisionSpec(ps) - print(txt2) - if txt != txt2: - raise AssertionError("Sanity-check failed: print(parse(print(x))) <> print(x)") - print("* OK: print(parse(print(x))) == print(x)") diff --git a/scripts/examples/python/renameif.py b/scripts/examples/python/renameif.py deleted file mode 100755 index 4a3d796e1da..00000000000 --- a/scripts/examples/python/renameif.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2008 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Allow the user to change the MAC address -> interface mapping - -from __future__ import print_function -import XenAPI, inventory, sys - -def warn(txt): - print(txt, file=sys.stderr) - -def show_pifs(pifs): - print("NIC MAC Notes") - print("----------------------------------------------") - for ref in pifs.keys(): - notes = [] - if pifs[ref]['management']: - notes.append("management interface") - nic = pifs[ref]['device'][3:] - try: - metrics = session.xenapi.PIF_metrics.get_record(session.xenapi.PIF.get_metrics(ref)) - if metrics['carrier']: - notes.append("carrier detected") - else: - notes.append("no carrier detected") - except: - pass - - print("%3s %s %s" % (nic, pifs[ref]['MAC'], ", ".join(notes))) - -def select(pifs, key): - """Select a PIF by device name or MAC""" - for ref in pifs.keys(): - if pifs[ref]['device'][3:] == key: - return ref - if pifs[ref]['MAC'].upper() == key.upper(): - return ref - return None - -def save(session, host, pifs): - """Commit changes""" - # Check that device names are unique - devices = [] - for ref in pifs.keys(): - devices.append(pifs[ref]['device'][3:]) - for i in set(devices): - devices.remove(i) - if devices != []: - print("ERROR: cannot assign two interfaces the same NIC number (%s)" % (", ".join(i))) - print("Aborted.") - sys.exit(1) - vifs = [] - for ref in pifs.keys(): - net = pifs[ref]['network'] - for vif in session.xenapi.network.get_VIFs(net): - if session.xenapi.VIF.get_currently_attached(vif): - vifs.append(vif) - if len(vifs) > 0: - plural = "" - if len(vifs) > 1: - plural = "s" - print("WARNING: this operation requires unplugging %d guest network interface%s" % (len(vifs), plural)) - print("Are you sure you want to continue? (yes/no) > ", end=' ') - if sys.stdin.readline().strip().lower() != "yes": - print("Aborted.") - sys.exit(1) - for vif in vifs: - dev = session.xenapi.VIF.get_device(vif) - vm = session.xenapi.VIF.get_VM(vif) - uuid = session.xenapi.VM.get_uuid(vm) - print("Hot-unplugging interface %s on VM %s" % (dev, uuid)) - session.xenapi.VIF.unplug(vif) - - for ref in pifs.keys(): - mac = pifs[ref]['MAC'] - if pifs[ref]['management']: - print("Disabling management NIC (%s)" % mac) - session.xenapi.host.management_disable() - session.xenapi.PIF.forget(ref) - for ref in pifs.keys(): - mac = pifs[ref]['MAC'] - device = pifs[ref]['device'] - mode = pifs[ref]['ip_configuration_mode'] - IP = pifs[ref]['IP'] - netmask = pifs[ref]['IP'] - gateway = pifs[ref]['gateway'] - DNS = pifs[ref]['DNS'] - new_ref = session.xenapi.PIF.introduce(host, mac, device) - session.xenapi.PIF.reconfigure_ip(new_ref, mode, IP, netmask, gateway, DNS) - if pifs[ref]['management']: - print("Re-enabling management NIC (%s)" % mac) - session.xenapi.host.management_reconfigure(new_ref) - - for vif in vifs: - dev = session.xenapi.VIF.get_device(vif) - vm = session.xenapi.VIF.get_VM(vif) - uuid = session.xenapi.VM.get_uuid(vm) - print("Hot-plugging interface %s on VM %s" % (dev, uuid)) - session.xenapi.VIF.plug(vif) - -def renameif(session): - uuid = inventory.get_localhost_uuid () - host = session.xenapi.host.get_by_uuid(uuid) - pool = session.xenapi.pool.get_all()[0] - master = session.xenapi.pool.get_master(pool) - if host != master: - warn("This host is a slave; it is not possible to rename the management interface") - - pifs = session.xenapi.PIF.get_all_records() - for ref in pifs.keys(): - if pifs[ref]['host'] != host or pifs[ref]['physical'] != True: - del pifs[ref] - - while True: - print("Current mappings:") - show_pifs(pifs) - print() - print("Type 'quit' to quit; 'save' to save; or a NIC number or MAC address to edit") - print("> ", end=' ') - x = sys.stdin.readline().strip() - if x.lower() == 'quit': - sys.exit(0) - if x.lower() == 'save': - # If a slave, filter out the management PIF - if host != master: - for ref in pifs.keys(): - if pifs[ref]['management']: - del pifs[ref] - save(session, host, pifs) - sys.exit(0) - pif = select(pifs, x) - if pif != None: - # Make sure this is not a slave's management PIF - if host != master and pifs[pif]['management']: - print("ERROR: cannot modify the management interface of a slave.") - else: - print("Selected NIC with MAC '%s'. Enter new NIC number:" % pifs[pif]['MAC']) - print("> ", end=' ') - nic = sys.stdin.readline().strip() - if not(nic.isdigit()): - print("ERROR: must enter a number (e.g. 0, 1, 2, 3, ...)") - else: - pifs[pif]['device'] = "eth" + nic - else: - print("NIC '%s' not found" % (x)) - print() - - -if __name__ == "__main__": - session = XenAPI.xapi_local() - session.login_with_password("", "", "1.0", "xen-api-scripts-renameifs.py") - try: - renameif(session) - finally: - session.logout() diff --git a/scripts/examples/python/shell.py b/scripts/examples/python/shell.py deleted file mode 100644 index 6e5e4f8ff27..00000000000 --- a/scripts/examples/python/shell.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2006-2008 Citrix Systems. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import print_function -import atexit -import cmd -import pprint -import readline -import shlex -import string -import sys - -import XenAPI - -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) - -class Shell(cmd.Cmd): - def __init__(self): - cmd.Cmd.__init__(self) - self.identchars = string.ascii_letters + string.digits + '_.' - self.prompt = "xe> " - - def preloop(self): - cmd.Cmd.preloop(self) - readline.set_completer_delims(' ') - - def default(self, line): - words = shlex.split(line) - if len(words) > 0: - res = session.xenapi_request(words[0], tuple(words[1:])) - if res is not None and res != '': - pprint.pprint(res) - return False - - def completedefault(self, text, line, begidx, endidx): - words = shlex.split(line[:begidx]) - clas, func = words[0].split('.') - if len(words) > 1 or \ - func.startswith('get_by_') or \ - func == 'get_all': - return [] - uuids = session.xenapi_request('%s.get_all' % clas, ()) - return [u + " " for u in uuids if u.startswith(text)] - - def emptyline(self): - pass - - def do_EOF(self, line): - print() - sys.exit(0) - -def munge_types (str): - if str == "True": - return True - elif str == "False": - return False - - try: - return int(str) - except: - return str - -if __name__ == "__main__": - if len(sys.argv) < 2: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - if sys.argv[1] != "-" and len(sys.argv) < 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - if sys.argv[1] != "-": - url = sys.argv[1] - username = sys.argv[2] - password = sys.argv[3] - session = XenAPI.Session(url) - session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-shell.py") - cmdAt = 4 - else: - session = XenAPI.xapi_local() - session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-shell.py") - cmdAt = 2 - - # We want to support directly executing the cmd line, - # where appropriate - if len(sys.argv) > cmdAt: - cmd = sys.argv[cmdAt] - params = [munge_types(x) for x in sys.argv[(cmdAt + 1):]] - try: - print(session.xenapi_request(cmd, tuple(params)), file=sys.stdout) - except XenAPI.Failure as x: - print(x, file=sys.stderr) - sys.exit(2) - except Exception as e: - print(e, file=sys.stderr) - sys.exit(3) - sys.exit(0) - else: - Shell().cmdloop('Welcome to the XenServer shell. (Try "VM.get_all")') diff --git a/scripts/examples/smapiv2.py b/scripts/examples/smapiv2.py deleted file mode 100644 index cc990dcadf2..00000000000 --- a/scripts/examples/smapiv2.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import os, sys, time, socket, traceback - -log_f = open(os.dup(sys.stdout.fileno()), "w") -pid = None - -def reopenlog(log_file): - global log_f - if log_f: - log_f.close() - if log_file: - try: - log_f = open(log_file, "a") - except FilenotFoundError: - log_f = open(log_file, "w") - else: - log_f = open(os.dup(sys.stdout.fileno()), "a") - -def log(txt): - global log_f, pid - if not pid: - pid = os.getpid() - t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime()) - print("%s [%d] %s" % (t, pid, txt), file=log_f) - log_f.flush() - -# Functions to construct SMAPI return types ################################# - -unit = [ "Success", "Unit" ] - -# Throw this to return an SR_BACKEND_FAILURE to the caller ################## - -class BackendError(Exception): - def __init__(self, code, params): - self.code = code - self.params = params - def __str__(self): - return "BackendError(%s, %s)" % (self.code, ", ".join(self.params)) - -class Vdi_does_not_exist(Exception): - def __init__(self, vdi): - self.vdi = vdi - def __str__(self): - return "Vdi_does_not_exist(%s)" % self.vdi - -def vdi(vdi_info): -# return ['Success', ['Vdi', {'vdi': location, 'virtual_size': str(virtual_size) }]] - return ['Success', ['Vdi', vdi_info]] - -def vdis(vis): - return ['Success', ['Vdis', vis]] - -def params(params): - return ['Success', ['Params', params ]] - -def value(result): - return { "Status": "Success", "Value": result } - -def backend_error(code, params): - return [ "Failure", [ "Backend_error", code, params ] ] - -def internal_error(txt): - return [ "Failure", "Internal_error", txt ] - -def vdi_does_not_exist(): - return [ "Failure", "Vdi_does_not_exist" ] - -# Type-checking helper functions ############################################ - -vdi_info_types = { - "vdi": type(""), - "name_label": type(""), - "name_description": type(""), - "ty": type(""), - "metadata_of_pool": type(""), - "is_a_snapshot": type(True), - "snapshot_time": type(""), - "snapshot_of": type(""), - "read_only": type(True), - "cbt_enabled": type(True), - "virtual_size": type(""), - "physical_utilisation": type("") -} - -def make_vdi_info(v): - global vdi_info_types - for k in vdi_info_types: - t = vdi_info_types[k] - if t == type(""): - v[k] = str(v[k]) - elif t == type(True): - v[k] = str(v[k]).lower() == "true" - else: - raise BackendError("make_vdi_info unknown type", [ str(t) ]) - return v - -def vdi_info(v): - global vdi_info_types - for k in vdi_info_types: - if k not in v: - raise BackendError("vdi_info missing key", [ k, repr(v) ]) - t = vdi_info_types[k] - if type(v[k]) != t: - raise BackendError("vdi_info key has wrong type", [ k, str(t), str(type(v[k])) ]) - return v - -def expect_none(x): - if x != None: - raise BackendError("type error", [ "None", repr(x) ]) - -def expect_long(x): - if type(x) != type(0): - raise BackendError("type error", [ "long int", repr(x) ]) - -def expect_string(x): - if type(x) != type(""): - raise BackendError("type error", [ "string", repr(x) ]) - -# Well-known feature flags understood by xapi ############################## - -feature_sr_probe = "SR_PROBE" -feature_sr_update = "SR_UPDATE" -feature_sr_supports_local_caching = "SR_SUPPORTS_LOCAL_CACHING" -feature_vdi_create = "VDI_CREATE" -feature_vdi_destroy = "VDI_DESTROY" -feature_vdi_attach = "VDI_ATTACH" -feature_vdi_detach = "VDI_DETACH" -feature_vdi_resize = "VDI_RESIZE" -feature_vdi_resize_online = "VDI_RESIZE_ONLINE" -feature_vdi_clone = "VDI_CLONE" -feature_vdi_snapshot = "VDI_SNAPSHOT" -feature_vdi_activate = "VDI_ACTIVATE" -feature_vdi_deactivate = "VDI_DEACTIVATE" -feature_vdi_update = "VDI_UPDATE" -feature_vdi_introduce = "VDI_INTRODUCE" -feature_vdi_generate_config = "VDI_GENERATE_CONFIG" -feature_vdi_reset_on_boot = "VDI_RESET_ON_BOOT" - -# Unmarshals arguments and marshals results (including exceptions) ########## - -class Marshall: - def __init__(self, x): - self.x = x - - def query(self, args): - result = self.x.query() - return value(result) - - def sr_attach(self, args): - result = self.x.sr_attach(args["task"], args["sr"], args["device_config"]) - expect_none(result) - return value(unit) - def sr_detach(self, args): - result = self.x.sr_detach(args["task"], args["sr"]) - expect_none(result) - return value(unit) - def sr_destroy(self, args): - result = self.x.sr_destroy(args["task"], args["sr"]) - expect_none(result) - return value(unit) - def sr_scan(self, args): - vis = self.x.sr_scan(args["task"], args["sr"]) - result = [vdi_info(vi) for vi in vis] - return value(vdis(result)) - - def vdi_create(self, args): - vi = self.x.vdi_create(args["task"], args["sr"], vdi_info(args["vdi_info"]), args["params"]) - return value(vdi(vdi_info(vi))) - def vdi_destroy(self, args): - result = self.x.vdi_destroy(args["task"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - - def vdi_attach(self, args): - result = self.x.vdi_attach(args["task"], args["dp"], args["sr"], args["vdi"], args["read_write"]) - expect_string(result) - return value(params(result)) - def vdi_activate(self, args): - result = self.x.vdi_activate(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - def vdi_deactivate(self, args): - result = self.x.vdi_deactivate(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - def vdi_detach(self, args): - result = self.x.vdi_detach(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - - - def _dispatch(self, method, params): - try: - log("method = %s params = %s" % (method, repr(params))) - args = params[0] - if method == "query": - return self.query(args) - elif method == "SR.attach": - return self.sr_attach(args) - elif method == "SR.detach": - return self.sr_detach(args) - elif method == "SR.scan": - return self.sr_scan(args) - elif method == "VDI.create": - return self.vdi_create(args) - elif method == "VDI.destroy": - return self.vdi_destroy(args) - elif method == "VDI.attach": - return self.vdi_attach(args) - elif method == "VDI.activate": - return self.vdi_activate(args) - elif method == "VDI.deactivate": - return self.vdi_deactivate(args) - elif method == "VDI.detach": - return self.vdi_detach(args) - except BackendError as e: - log("caught %s" % e) - traceback.print_exc() - return value(backend_error(e.code, e.params)) - except Vdi_does_not_exist as e: - log("caught %s" %e) - return value(vdi_does_not_exist()) - except Exception as e: - log("caught %s" % e) - traceback.print_exc() - return value(internal_error(str(e))) - -# Helper function to daemonise ############################################## -def daemonize(): - def fork(): - try: - if os.fork() > 0: - # parent - sys.exit(0) - except Exception as e: - print("fork() failed: %s" % e, file=sys.stderr) - traceback.print_exc() - raise - fork() - os.umask(0) - os.chdir("/") - os.setsid() - fork() - devnull = open("/dev/null", "r") - os.dup2(devnull.fileno(), sys.stdin.fileno()) - devnull = open("/dev/null", "aw") - os.dup2(devnull.fileno(), sys.stdout.fileno()) - os.dup2(devnull.fileno(), sys.stderr.fileno()) - -from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler - -# Server XMLRPC from any HTTP POST path ##################################### - -class RequestHandler(SimpleXMLRPCRequestHandler): - rpc_paths = [] - -# SimpleXMLRPCServer with SO_REUSEADDR ###################################### - -class Server(SimpleXMLRPCServer): - def __init__(self, ip, port): - SimpleXMLRPCServer.__init__(self, (ip, port), requestHandler=RequestHandler) - def server_bind(self): - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - SimpleXMLRPCServer.server_bind(self) - -# This is a hack to patch slow socket.getfqdn calls that -# BaseHTTPServer (and its subclasses) make. -# See: http://bugs.python.org/issue6085 -# See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/ -import http.server - -def _bare_address_string(self): - host, port = self.client_address[:2] - return '%s' % host - -http.server.BaseHTTPRequestHandler.address_string = \ - _bare_address_string - -# Given an implementation, serve requests forever ########################### - -def start(impl, ip, port, daemon): - if daemon: - log("daemonising") - daemonize() - log("will listen on %s:%d" % (ip, port)) - server = Server(ip, port) - log("server registered on %s:%d" % (ip, port)) - server.register_introspection_functions() # for debugging - server.register_instance(Marshall(impl)) - log("serving requests forever") - server.serve_forever() diff --git a/scripts/examples/storage-server b/scripts/examples/storage-server deleted file mode 100755 index d5d859d9f14..00000000000 --- a/scripts/examples/storage-server +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -. /etc/rc.subr - -name="storageserver" -start_cmd="${name}_start" -stop_cmd=":" - -storageserver_start() -{ - ip=$(ifconfig xn0 | grep inet | cut -f 2 -d " ") - cd /root - /usr/local/bin/python storage.py --ip-addr ${ip} --port 8080 --log /var/log/SMlog --daemon - echo "storageserver started on ${ip}." -} - -load_rc_config $name -run_rc_command "$1" diff --git a/scripts/examples/storage.py b/scripts/examples/storage.py deleted file mode 100755 index 91214a84db4..00000000000 --- a/scripts/examples/storage.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) Citrix Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Example storage backend using SMAPIv2 using raw files and Linux losetup - -# WARNING: this API is considered to be unstable and may be changed at-will - -from __future__ import print_function -import os, sys, subprocess, json - -import smapiv2 -from smapiv2 import log, start, BackendError, Vdi_does_not_exist - -root = "/sr/" - -# [run task cmd] executes [cmd], throwing a BackendError if exits with -# a non-zero exit code. -def run(task, cmd): - code, output = subprocess.getstatusoutput(cmd) - if code != 0: - log("%s: %s exitted with code %d: %s" % (task, cmd, code, output)) - raise BackendError - log("%s: %s" % (task, cmd)) - return output - -# Use Linux "losetup" to create block devices from files -class Loop: - # [_find task path] returns the loop device associated with [path] - def _find(self, task, path): - global root - for line in run(task, "losetup -a").split("\n"): - line = line.strip() - if line != "": - bits = line.split() - loop = bits[0][0:-1] - this_path = bits[2][1:-1] - if this_path == path: - return loop - return None - # [add task path] creates a new loop device for [path] and returns it - def add(self, task, path): - run(task, "losetup -f %s" % path) - return self._find(task, path) - # [remove task path] removes the loop device associated with [path] - def remove(self, task, path): - loop = self._find(task, path) - run(task, "losetup -d %s" % loop) - -# Use FreeBSD "mdconfig" to create block devices from files -class Mdconfig: - # [_find task path] returns the unit (mdX) associated with [path] - def _find(self, task, path): - # md0 vnode 1024M /root/big.img - for line in run(task, "mdconfig -l -v").split("\n"): - if line == "": - continue - bits = line.split() - this_path = bits[3] - if this_path == path: - return bits[0] # md0 - return None - # [add task path] returns a block device associated with [path] - def add(self, task, path): - return "/dev/" + run(task, "mdconfig -a -t vnode -f %s" % path) - # [remove task path] removes the block device associated with [path] - def remove(self, task, path): - md = self._find(task, path) - if md: - run(task, "mdconfig -d -u %s" % md) - -# [path_of_vdi vdi] returns the path in the local filesystem corresponding -# to vdi location [vdi] -def path_of_vdi(vdi): - global root - return root + vdi - -disk_suffix = ".raw" -metadata_suffix = ".json" - -class RawFiles: - def __init__(self, device): - self.device = device - - def query(self): - return { "name": "RawFiles", - "vendor": "XCP", - "version": "0.1", - "features": [ smapiv2.feature_vdi_create, - smapiv2.feature_vdi_destroy, - smapiv2.feature_vdi_attach, - smapiv2.feature_vdi_detach, - smapiv2.feature_vdi_activate, - smapiv2.feature_vdi_deactivate ] } - - def sr_attach(self, task, sr, device_config): - if not(os.path.exists(root)): - raise BackendError("SR directory doesn't exist", [ root ]) - def sr_detach(self, task, sr): - pass - def sr_destroy(self, task, sr): - pass - def sr_scan(self, task, sr): - global root - log("scanning") - results = [] - for name in os.listdir(root): - if name.endswith(metadata_suffix): - path = root + "/" + name - f = open(path, "r") - try: - vdi_info = json.loads(f.read()) - results.append(smapiv2.make_vdi_info(vdi_info)) - finally: - f.close() - return results - - def vdi_create(self, task, sr, vdi_info, params): - filename = run(task, "uuidgen") - run(task, "dd if=/dev/zero of=%s%s bs=1 count=0 seek=%s" % (path_of_vdi(filename), disk_suffix, vdi_info["virtual_size"])) - vdi_info["vdi"] = filename - f = open(path_of_vdi(filename) + metadata_suffix, "w") - try: - f.write(json.dumps(vdi_info)) - finally: - f.close() - return vdi_info - def vdi_destroy(self, task, sr, vdi): - if not (os.path.exists(path_of_vdi(vdi) + disk_suffix)): - raise Vdi_does_not_exist(vdi) - run(task, "rm -f %s%s" % (path_of_vdi(vdi), disk_suffix)) - run(task, "rm -f %s%s" % (path_of_vdi(vdi), metadata_suffix)) - - def vdi_attach(self, task, dp, sr, vdi, read_write): - path = path_of_vdi(vdi) + disk_suffix - loop = self.device.add(task, path) - log("loop = %s" % repr(loop)) - return loop - - def vdi_activate(self, task, dp, sr, vdi): - pass - def vdi_deactivate(self, task, dp, sr, vdi): - pass - def vdi_detach(self, task, dp, sr, vdi): - path = path_of_vdi(vdi) + disk_suffix - self.device.remove(task, path) - -if __name__ == "__main__": - from optparse import OptionParser - - parser = OptionParser() - parser.add_option("-l", "--log", dest="logfile", help="log to LOG", metavar="LOG") - parser.add_option("-p", "--port", dest="port", help="listen on PORT", metavar="PORT") - parser.add_option("-i", "--ip-addr", dest="ip", help="listen on IP", metavar="IP") - parser.add_option("-d", "--daemon", action="store_true", dest="daemon", help="run as a background daemon", metavar="DAEMON") - (options, args) = parser.parse_args() - if options.logfile: - from smapiv2 import reopenlog - reopenlog(options.logfile) - if not options.ip and not options.ip: - print("Need an --ip-addr and --port. Use -h for help", file=sys.stderr) - sys.exit(1) - - ip = options.ip - port = int(options.port) - - arch = run("startup", "uname") - if arch == "Linux": - log("startup: Using loop devices") - start(RawFiles(Loop()), ip, port, options.daemon) - elif arch == "FreeBSD": - log("startup: Using mdconfig devices") - start(RawFiles(Mdconfig()), ip, port, options.daemon) - else: - log("startup: Unknown architecture: %s" % arch) diff --git a/scripts/extensions/Test.test b/scripts/extensions/Test.test deleted file mode 100755 index f49f8c22e07..00000000000 --- a/scripts/extensions/Test.test +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 - - -import xmlrpc.client, sys - -def success_message(result): - rpcparams = { 'Status': 'Success', 'Value': result } - return xmlrpc.client.dumps((rpcparams, ), '', True) - -def failure_message(code, params): - rpcparams = { 'Status': 'Failure', 'ErrorDescription': [ code ] + params } - return xmlrpc.client.dumps((rpcparams, ), '', True) - -if __name__ == "__main__": - txt = sys.stdin.read() - req = xmlrpc.client.loads(txt) - print (failure_message("CODE", [ "a", "b" ])) - #print (success_message("")) - - diff --git a/scripts/generate-iscsi-iqn b/scripts/generate-iscsi-iqn index 0d662b0441c..9550435716d 100755 --- a/scripts/generate-iscsi-iqn +++ b/scripts/generate-iscsi-iqn @@ -21,7 +21,8 @@ def f(x): tmp = x.rstrip().split(".") tmp.reverse() return ".".join(tmp) -if __name__ == "__main__": print f(sys.argv[1]) + +if __name__ == "__main__": print(f(sys.argv[1])) ' geniqn() { @@ -35,7 +36,7 @@ geniqn() { domain=${defaultdomain} fi - revdomain=$(python -c "${REVERSE_PY}" $domain) + revdomain=$(python3 -c "${REVERSE_PY}" $domain) uuid=$(uuidgen | cut -d- -f1) date=$(date +"%Y-%m") diff --git a/scripts/hatests b/scripts/hatests deleted file mode 100755 index 8828820ecb3..00000000000 --- a/scripts/hatests +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import XenAPI -import getopt -import sys -import os -import commands -import random -import time -import httplib -import urllib - -def check(svm, ip): - """ - checking that the pool is in the same condition as before - """ - global master - global masterref - global hosts - global vmrunning - flag = True - masterref2 = svm.xenapi.pool.get_all_records().values()[0]['master'] - if masterref2 != masterref : - print("From " + ip + " point of view the pool master is " + svm.xenapi.host.get_record(masterref2)["address"]) - flag = False - hosts2 = svm.xenapi.host.get_all_records() - if len(hosts) != len(hosts2) : - print("From " + ip + " point of view the number of hosts is changed.") - flag = False - for k in hosts.keys() : - if k not in hosts2 : - print("From " + ip + " point of view " + hosts[k]["address"] + " is not present any more.") - vmrecords2 = svm.xenapi.VM.get_all_records() - vmrunning2 = {} - for k, v in vmrecords2.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) == 0: - vmrunning2[k] = v - if len(vmrunning) != len(vmrunning2) : - print("From " + ip + " point of view some VMs have changed state.") - flag = False - for k, v in vmrunning.iteritems() : - if k not in vmrunning2 : - print("From " + ip + " point of view " + v['name_label'] + " is not online any more.") - if flag : - print("On %s everything is consistent." % ip) - -def help() : - print(""" - Usage: hatests - - where options can be: - -w, --wait wait time between stopping an host and restarting it - (default 120) - - where test can be: - master_hard_failure - master_soft_failure - slave_hard_failure - slave_soft_failure - master_vif_unplug - """) - -###### START ###### - -secs = 120 - -optlist, args = getopt.getopt(sys.argv[1:],"w:h", ["wait=", "help"]) -for o, a in optlist: - if o == "-w" or o == "--wait": - secs = int(a) - elif o == "-h" or o == "--help" : - help() - sys.exit(0) - -if len(args) != 1 : - help() - sys.exit(1) - -##read config file -#config = open(sys.args[1], "r") -#slave = [] -#for line in config : -# type, ip = line.lstrip().split() -# if type == "master" : -# master = ip -# else : -# slave.append(ip) - -#connection -s = XenAPI.Session('http://localhost') -s.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - -#Getting all the installed and running VMs with dom-id > 0 -slaves = [] -master = None -vmrecords = s.xenapi.VM.get_all_records() -for k, v in vmrecords.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) > 0: - ip = commands.getoutput("xenstore-ls /local/domain/" + v['domid'] + " | grep ip") - try: - ip = ip.split()[2] - ip = ip[1:-1] - slaves.append((k, ip)) - except: - print("VM in dom" + v['domid'] + " doesn't have an IP address") - -#finding out which one is the master -svm = XenAPI.Session("http://" + slaves[0][1]) -try : - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - masterref = svm.xenapi.pool.get_all_records().values()[0]['master'] - masterrecord = svm.xenapi.host.get_record(masterref) - masterip = masterrecord['address'] -except XenAPI.Failure as inst: - masterip = inst.details[1] - svm = XenAPI.Session("http://" + masterip) - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - masterref = svm.xenapi.pool.get_all_records().values()[0]['master'] -for i in slaves : - if masterip == i[1] : - master = i - slaves.remove(i) - break -print("Master ip address is " + master[1]) - -#getting ip -> hostref references -hosts = {} -hostsrecs = svm.xenapi.host.get_all_records() -for k, v in hostsrecs.iteritems() : - hosts[v['address']] = k - -#getting the VM running -vmrunning = {} -vmrecords = svm.xenapi.VM.get_all_records() -for k, v in vmrecords.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) == 0: - vmrunning[k] = v - -bringup = None -vifbringup = None -if sys.argv[-1] == "master_hard_failure" : - print("Shutting down the master") - s.xenapi.VM.hard_shutdown(master[0]) - bringup = master[0] -elif sys.argv[-1] == "master_soft_failure" : - print("Shutting down the master") - s.xenapi.VM.clean_shutdown(master[0]) - bringup = master[0] -elif sys.argv[-1] == "slave_hard_failure" : - r = random.randint(0, len(slaves) - 1) - print("Shutting down slave " + slaves[r][1]) - s.xenapi.VM.hard_shutdown(slaves[r][0]) - bringup = slaves[r][0] -elif sys.argv[-1] == "slave_hard_failure" : - r = random.randint(0, len(slaves) - 1) - print("Shutting down slave " + slaves[r][1]) - s.xenapi.VM.clean_shutdown(slaves[r][0]) - bringup = slaves[r][0] -elif sys.argv[-1] == "master_vif_unplug" : - print("Unplugging the first found attached VIF in the master") - allvifs = s.xenapi.VIF.get_all_records() - for k, v in allvifs.iteritems() : - if v['currently_attached'] and v['VM'] == master[0]: - vifbringup = k - s.xenapi.VIF.unplug(vifbringup) - break - - -print("Waiting " + str(secs) + " seconds") -count = 0 -while count < secs : - time.sleep(1) - sys.stdout.write(".") - sys.stdout.flush() - count = count + 1 -sys.stdout.write("\n") - -if bringup is not None : - print("Bringing the host up again") - s.xenapi.VM.start(bringup, False, True) -if vifbringup is not None : - print("Plugging the VIF back again") - s.xenapi.VIF.plug(vifbringup) - -print("Waiting " + str(secs) + " seconds") -count = 0 -while count < secs : - time.sleep(1) - sys.stdout.write(".") - sys.stdout.flush() - count = count + 1 -sys.stdout.write("\n") - -print("Collecting logs now...") -try : - fileout = open("master-" + master[1] + "-log.tar.bz2", "w") - f = urllib.urlopen("http://root:xenroot@" + master[1] + "/system-status?host_id=" + hosts[master[1]]) - buf = f.read(50) - if len(buf) == 0 : - print(master[1] + " returned an empty log.") - else : - print("Wrote master log to master-" + master[1] + "-log.tar.bz2") - while len(buf) > 0 : - fileout.write(buf) - buf = f.read(50) -except IOError: - print("Unable to connect to %s: network error." % master[1]) -try: - fileout.close() - f.close() -except: - pass - -for k, ip in slaves : - try : - fileout = open("slave-" + ip + "-log.tar.bz2", "w") - f = urllib.urlopen("http://root:xenroot@" + ip + "/system-status?host_id=" + hosts[ip]) - buf = f.read(50) - if len(buf) == 0 : - print(ip + " returned an empty log.") - else : - print("Wrote slave " + ip + " log to slave-" + ip + "-log.tar.bz2") - while len(buf) > 0 : - fileout.write(buf) - buf = f.read(50) - except IOError: - print("Unable to connect to %s: network error." % ip) - try: - fileout.close() - f.close() - except: - pass - -#checking if everything is still OK -print("Connecting to " + master[1] + "...") -svm = XenAPI.Session("http://" + master[1]) -try : - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - check(svm, master[1]) -except XenAPI.Failure as inst: - if inst.details[0] == "HOST_IS_SLAVE" : - print(master[0] + " is not master any more") -except IOError: - print("Unable to connect to %s: network error." % master[1]) - -for slave in slaves : - print("Connecting to " + slave[1] + "...") - svm = XenAPI.Session("http://" + slave[1]) - try: - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - print("Connection succeeded! Is %s still a slave?" % slave[1]) - check(svm, slave[1]) - except XenAPI.Failure as inst: - if inst.details[0] == "HOST_IS_SLAVE" : - print("Connection failed because %s is still a slave." % slave[1]) - else : - print("Unable to connect to %s: XenAPI failure." % slave[1]) - except IOError: - print("Unable to connect to %s: network error." % slave[1]) diff --git a/scripts/mtcerrno-to-ocaml.py b/scripts/mtcerrno-to-ocaml.py deleted file mode 100755 index 399d265f724..00000000000 --- a/scripts/mtcerrno-to-ocaml.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python - -# Convert the MTC exit codes into a disjoint union type. Each line in the file looks like: - -# errdef, MTC_EXIT_SUCCESS, 0, 0, "", - -# Usage: -# cat ../xha.hg/include/mtcerrno.def | ./scripts/mtcerrno-to-ocaml.py > ocaml/xapi/xha_errno.ml - -from __future__ import print_function -import sys - -def parse(file): - all = [] - while True: - line = file.readline() - if line == "": - return all - if line.startswith("errdef, MTC_EXIT"): - bits = line.split(",") - name = bits[1].strip() - code = bits[2].strip() - desc = bits[4].strip() - this = { "name": name, "code": code, "desc": desc } - all.append(this) - -def ctor_name(x): - ctor = x['name'] - return ctor[0].upper() + ctor[1:].lower() - -def make_datatype(all): - print("type code = ") - for x in all: - print("| %s" % ctor_name(x)) - -def to_string(all): - print("let to_string : code -> string = function") - for x in all: - print("| %s -> \"%s\"" % (ctor_name(x), x['name'])) - -def to_description_string(all): - print("let to_description_string : code -> string = function") - for x in all: - print("| %s -> %s" % (ctor_name(x), x['desc'])) - -def of_int(all): - print("let of_int : int -> code = function") - for x in all: - print("| %s -> %s" % (x['code'], ctor_name(x))) - print("| x -> failwith (Printf.sprintf \"Unrecognised MTC exit code: %d\" x)") - -if __name__ == "__main__": - all = parse(sys.stdin) - print("(* Autogenerated by %s -- do not edit *)" % (sys.argv[0])) - make_datatype(all) - to_string(all) - to_description_string(all) - of_int(all) - - - - - diff --git a/scripts/perfmon b/scripts/perfmon deleted file mode 100644 index a84c8eb5d61..00000000000 --- a/scripts/perfmon +++ /dev/null @@ -1,1263 +0,0 @@ -#!/usr/bin/env python -# -# perfmon - a daemon for monitoring performance of the host on which it is run -# and of all the local VMs, and for generating events based on configurable -# triggers -# -# Notes: -# ====== -# The XAPI instance running on localhost monitors a number of variables -# for each VM running locally (i.e not on other pool members) and -# for the host itself. Each variable is stored in 16 RRDs (Round Robin Databases). -# -# Consolidation Number of samples in RRD -# function 5s/sample 1m/sample 1hr/sample 1day/sample -# AVERAGE 120 (10m) 120 (2h) ? ? -# MIN 120 (10m) 120 (2h) ? ? -# MAX 120 (10m) 120 (2h) ? ? -# LAST 120 (10m) 120 (2h) ? ? -# -# The "Consolidation function" tells how that RRD is built up from the -# one with the next highest sample rate. E.g. In the 1m/sample "AVERAGE" RRD -# each sample is the average of 12 from the 1s/sample "AVERAGE" RRD, whereas -# in the 1m/sample "MIN" RRD each sample is the minimum of 12 from the 1s/sample -# "AVERAGE" RRD. -# -# When XAPI is queried over http it selects the column (e.g. "1hr/sample") -# based on the "start" CGI param. It will return the highest level of granularity -# available for the period requested. -# -# The "cf" CGI param specfies the row. (All rows are returned if it's missing.) - -from __future__ import print_function -import sys -import os -import getopt -import traceback -import XenAPI -import urllib -from xml import sax # used to parse rrd_updates because this may be large and sax is more efficient -from xml.dom import minidom # used to parse other-config:perfmon. Efficiency is less important than reliability here -from xml.parsers.expat import ExpatError -import time -import re -import random -import syslog -import socket -import gc -import signal -import commands - -def print_debug(string): - if debug: - print("DEBUG:", string, file=sys.stderr) - syslog.syslog(syslog.LOG_USER | syslog.LOG_INFO, "PERFMON(DEBUG): %s" % string) - -def log_err(string): - print(string, file=sys.stderr) - syslog.syslog(syslog.LOG_USER | syslog.LOG_ERR, "PERFMON: %s" % string) - pass - -def log_info(string): - print(string, file=sys.stderr) - syslog.syslog(syslog.LOG_INFO | syslog.LOG_INFO, "PERFMON: %s" % string) - pass - -def debug_mem(): - objCount = {} - gc.collect() - objList = gc.get_objects() - for obj in objList: - if getattr(obj, "__class__", None): - name = obj.__class__.__name__ - else: - name = type(obj) - if name in objCount: - objCount[name] += 1 - else: - objCount[name] = 1 - - output = [] - for name in objCount: - output.append("%s :%s" % (name, objCount[name])) - log_info("\n".join(output)) - -class PerfMonException(Exception): - pass - -class XmlConfigException(PerfMonException): - pass - -class UsageException(Exception): - pass - - -# Start a session with the master of a pool. -# Note: when calling http://localhost/rrd_update we must pass the session -# ID as a param. The host then uses this to verify our validity with -# the master before responding. -# If the verification fails we should get a 401 response -class XapiSession(XenAPI.Session): - """ Object that represents a XenAPI session with the pool master - One of these is needed to refresh a VMMonitor or HOSTMonitor config, or - to refresh an RRDUpdates object - """ - def __init__(self): - XenAPI.Session.__init__(self, "http://_var_xapi_xapi", transport=XenAPI.UDSTransport()) - self.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-perfmon") - def __del__ (self): - self.xenapi.session.logout() - def id(self): - return self._session - -class ObjectReport: - def __init__(self, objtype, uuid): - self.objtype = objtype # a string like "vm", or "host" taken from an tag - self.uuid = uuid # the object's uuid - self.vars = {} # maps rrd variable name to array of floats - def get_uuid(self): - return self.uuid - def get_var_names(self): - return self.vars.keys() - def get_value(self, var_name, row): - try: - return (self.vars[var_name])[row] - except: - return 0.0 - def insert_value(self, var_name, index, value): - if var_name not in self.vars: - self.vars[var_name] = [] - self.vars[var_name].insert(index, value) - -class RRDReport: - "This is just a data structure passed that is completed by RRDContentHandler" - def __init__(self): - self.reset() - - def reset(self): - self.columns = 0 # num xapi vars in xml - self.rows = 0 # num samples in xml - self.start_time = 0 # timestamp of 1st sample in xml - self.end_time = 0 # timestamp of last sample in xml - self.step_time = 0 # seconds between each pair of samples - self.obj_reports = {} # maps uuids to ObjectReports, built from xml - -class RRDColumn: - "class used internally by RRDContentHandler" - def __init__(self, paramname, obj_report): - self.paramname = paramname - self.obj_report = obj_report - -class RRDContentHandler(sax.ContentHandler): - """ Handles data in this format: - - - INTEGER - INTEGER - INTEGER - INTEGER - INTEGER - - IGNOREME:(host|vm):UUID:PARAMNAME - ... another COLUMNS-1 entries ... - - - - - INTEGER(END_TIME) - FLOAT - ... another COLUMNS-1 values ... - - ... another ROWS-2 rows - - INTEGER(START_TIME) - FLOAT - ... another COLUMNS-1 values ... - - - - """ - def __init__(self, report): - "report is saved and later updated by this object. report should contain defaults already" - self.report = report - - self.in_start_tag = False - self.in_step_tag = False - self.in_end_tag = False - self.in_rows_tag = False - self.in_columns_tag = False - self.in_entry_tag = False - self.in_row_tag = False - self.column_details = [] - self.row = 0 - - def startElement(self, name, attrs): - self.raw_text = "" - if name == 'start': - self.in_start_tag = True - elif name == 'step': - self.in_step_tag = True - elif name == 'end': - self.in_end_tag = True - elif name == 'rows': - self.in_rows_tag = True - elif name == 'columns': - self.in_columns_tag = True - elif name == 'entry': - self.in_entry_tag = True - elif name == 'row': - self.in_row_tag = True - self.col = 0 - - if self.in_row_tag: - if name == 't': - self.in_t_tag = True - elif name == 'v': - self.in_v_tag = True - - def characters(self, chars): - if (self.in_start_tag or - self.in_step_tag or - self.in_end_tag or - self.in_rows_tag or - self.in_columns_tag or - self.in_entry_tag or - #self.in_row_tag # ignore text under row tag, s are just for holding and nodes - self.in_t_tag or - self.in_v_tag): - self.raw_text += chars - - def endElement(self, name): - if name == 'start': - # This overwritten later if there are any rows - self.report.start_time = int(self.raw_text) - self.in_start_tag = False - elif name == 'step': - self.report.step_time = int(self.raw_text) - self.in_step_tag = False - elif name == 'end': - # This overwritten later if there are any rows - self.report.end_time = int(self.raw_text) - self.in_end_tag = False - elif name == 'rows': - self.report.rows = int(self.raw_text) - self.in_rows_tag = False - elif name == 'columns': - self.report.columns = int(self.raw_text) - self.in_columns_tag = False - elif name == 'entry': - (_, objtype, uuid, paramname) = self.raw_text.split(':') - # lookup the obj_report corresponding to this uuid, or create if it does not exist - if uuid not in self.report.obj_reports: - self.report.obj_reports[uuid] = ObjectReport(objtype, uuid) - obj_report = self.report.obj_reports[uuid] - - # save the details of this column - self.column_details.append(RRDColumn(paramname, obj_report)) - self.in_entry_tag = False - elif name == 'row': - self.in_row_tag = False - self.row += 1 - elif name == 't': - # Extract start and end time from row data as it's more reliable than the values in the meta data - t = int(self.raw_text) - # Last row corresponds to start time - self.report.start_time = t - if self.row == 0: - # First row corresponds to end time - self.report.end_time = t - - self.in_t_tag = False - - elif name == 'v': - v = float(self.raw_text) - - # Find object report and paramname for this col - col_details = self.column_details[self.col] - obj_report = col_details.obj_report - paramname = col_details.paramname - - # Update object_report - obj_report.insert_value(paramname, index=0, value=v) # use index=0 as this is the earliest sample so far - - # Update position in row - self.col += 1 - - self.in_t_tag = False - - - -# An object of this class should persist the lifetime of the program -class RRDUpdates: - """ Object used to get and parse the output the http://localhost/rrd_udpates?... - """ - def __init__(self): - # params are what get passed to the CGI executable in the URL - self.params = dict() - self.params['start'] = int(time.time()) - interval # interval seconds ago - self.params['host'] = 'true' # include data for host (as well as for VMs) - self.params['sr_uuid'] = 'all' # include data for all SRs attached to this host - self.params['cf'] = 'AVERAGE' # consolidation function, each sample averages 12 from the 5 second RRD - self.params['interval'] = str(rrd_step) # distinct from the perfmon interval - self.report = RRDReport() # data structure updated by RRDContentHandler - - def __repr__(self): - return '' % str(self.params) - - def refresh(self, session, override_params = {}): - "reread the rrd_updates over CGI and parse" - params = override_params - params['session_id'] = session.id() - params.update(self.params) - paramstr = "&".join(["%s=%s" % (k,params[k]) for k in params]) - print_debug("Calling http://localhost/rrd_updates?%s" % paramstr) - - # this is better than urllib.urlopen() as it raises an Exception on http 401 'Unauthorised' error - # rather than drop into interactive mode - sock = urllib.URLopener().open("http://localhost/rrd_updates?%s" % paramstr) - xmlsource = sock.read() - sock.close() - - # Use sax rather than minidom and save Vvvast amounts of time and memory. - self.report.reset() - sax.parseString(xmlsource, RRDContentHandler(self.report)) - - # Update the time used on the next run - self.params['start'] = self.report.end_time + 1 # avoid retrieving same data twice - - print_debug("Refreshed rrd_updates, start = %d, end = %d, rows = %d" % \ - (self.report.start_time, self.report.end_time, self.report.rows)) - - def get_num_rows(self): - "Return the number of samples of each parameter" - return self.report.rows - - def get_obj_report_by_uuid(self, uuid): - "Return an ObjectReport for the object with this uuid" - try: - return self.report.obj_reports[uuid] - except: - return None - - def get_uuid_list_by_objtype(self, objtype): - "Return a list of uuids corresonding to the objects of this type for which we have ObjectReports" - return [ objrep.uuid - for objrep in self.report.obj_reports.values() - if objrep.objtype == objtype ] - - -# Consolidation functions: -supported_consolidation_functions = [ 'sum', 'average', 'max', 'get_percent_fs_usage', 'get_percent_log_fs_usage', 'get_percent_mem_usage', 'get_percent_sr_usage' ] - -def average(mylist): - return sum(mylist)/float(len(mylist)) - -def get_percent_log_fs_usage(ignored): - "Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty" - fs_output = commands.getoutput('df /etc/passwd') - log_fs_output = commands.getoutput('df /var/log') - fs_output = ' '.join(fs_output.splitlines()[1:]) - log_fs_output = ' '.join(log_fs_output.splitlines()[1:]) - # Get the percent usage only when there is a separate logs partition - if (fs_output.split()[0] != log_fs_output.split()[0]): - percentage = log_fs_output.split()[4] - # remove % character and convert to float - return float(percentage[0:-1])/100.0 - else: - return float('NaN') - -def get_percent_fs_usage(ignored): - "Get the percent usage of the host filesystem. Input list is ignored and should be empty" - # this file is on the filesystem of interest in both OEM and Retail - output = commands.getoutput('df /etc/passwd') - output = ' '.join(output.splitlines()[1:]) # remove header line and rewrap on single line - percentage = output.split()[4] - # remove % character and convert to float - return float(percentage[0:-1])/100.0 - -def get_percent_mem_usage(ignored): - "Get the percent usage of Dom0 memory/swap. Input list is ignored and should be empty" - try: - memfd = open('/proc/meminfo', 'r') - memlist = memfd.readlines() - memfd.close() - memdict = [ m.split(':', 1) for m in memlist ] - memdict = dict([(k.strip(), float(re.search('\d+', v.strip()).group(0))) for (k,v) in memdict]) - # We consider the sum of res memory and swap in use as the hard demand - # of mem usage, it is bad if this number is beyond the physical mem, as - # in such case swapping is obligatory rather than voluntary, hence - # degrading the performance. We define the percentage metrics as - # (res_mem + swap_in_use) / phy_mem, which could potentially go beyond - # 100% (but is considered bad when it does) - mem_in_use = memdict['MemTotal'] - memdict['MemFree'] - memdict['Buffers'] - memdict['Cached'] - swap_in_use = memdict['SwapTotal'] - memdict['SwapFree'] - return float(mem_in_use + swap_in_use) / memdict['MemTotal'] - except Exception as e: - log_err("Error %s in get_percent_mem_usage, return 0.0 instead" % e) - return 0.0 - -def get_percent_sr_usage(mylist): - """Get the percent usage of the SR. Input list should be exactly two items: [physical_utilisation, size]""" - try: - if len(mylist) != 2: - raise Exception("Incorrect number of values to consolidate: %d (exactly 2 values)" % len(mylist)) - physical_utilisation, size = mylist[0:2] - return float(physical_utilisation) / size - except Exception as e: - log_err("Error %s in get_percent_sr_usage, return 0.0 instead" % e) - return 0.0 - -class VariableConfig: - """Object storing the configuration of a Variable - - Initialisation parameters: - xmldoc = dom object representing the nodes in the ObjectMonitor config strings. - See VMMonitor.__doc__ and HOSTMonitor.__doc__ - alarm_create_callback = - callback called by Variable.update() to create and send an alarm - get_default_variable_config = - a function that VariableConfig.__init__() uses to lookup default tag values - by variable name - """ - def __init__(self, xmldoc, alarm_create_callback, get_default_variable_config): - try: name = xmldoc.getElementsByTagName('name')[0].getAttribute('value') - except IndexError: raise XmlConfigException("variable missing 'name' tag") - def get_value(tag): - try: - return xmldoc.getElementsByTagName(tag)[0].getAttribute('value') - except: - return get_default_variable_config(name, tag) - rrd_regex = get_value('rrd_regex') - consolidation_fn = get_value('consolidation_fn') - alarm_trigger_level = get_value('alarm_trigger_level') - alarm_trigger_period = get_value('alarm_trigger_period') - alarm_auto_inhibit_period = get_value('alarm_auto_inhibit_period') - alarm_trigger_sense = get_value('alarm_trigger_sense') - alarm_priority = get_value('alarm_priority') - - # Save xmldoc: we need this when creating the body of the alarms - self.xmldoc = xmldoc - - self.name = name - try: - self.rrd_regex = re.compile("^%s$" % rrd_regex) - except: - raise XmlConfigException("variable %s: regex %s does not compile" % (name, rrd_regex)) - - if consolidation_fn not in supported_consolidation_functions: - raise XmlConfigException("variable %s: consolidation function %s not supported" \ - % (name, consolidation_fn)) - self.consolidation_fn = eval(consolidation_fn) - - try: - self.alarm_trigger_period = int(alarm_trigger_period) - except: - raise XmlConfigException("variable %s: alarm_trigger_period %s not an int" % \ - (name, alarm_trigger_period)) - - try: - self.alarm_auto_inhibit_period = int(alarm_auto_inhibit_period) - except: - raise XmlConfigException("variable %s: alarm_auto_inhibit_period %s not an int" % \ - (name, alarm_auto_inhibit_period)) - try: - trigger_level = float(alarm_trigger_level) - except: - raise XmlConfigException("variable %s: alarm_trigger_level %s not a float" % \ - (name, alarm_trigger_level)) - - self.alarm_priority = alarm_priority - - if alarm_trigger_sense == "high": - self.test_level = lambda : (self.value > trigger_level) - else: - self.test_level = lambda : (self.value < trigger_level) - self.alarm_create_callback = alarm_create_callback - -def variable_configs_differ(vc1, vc2): - "Say whether configuration of one variable differs from that of another" - return vc1.xmldoc.toxml() != vc2.xmldoc.toxml() - -class VariableState: - """ Object storing the state of a Variable - """ - def __init__(self): - self.value = None - self.timeof_last_alarm = time.time() - self.alarm_auto_inhibit_period - self.trigger_down_counter = self.alarm_trigger_period - -class Variable(VariableConfig, VariableState): - """ Variable() is used by ObjectMonitor to create one Variable object for each - variable specified in it's config string - """ - def __init__(self, *args): - VariableConfig.__init__(self, *args) - VariableState.__init__(self) - self.active = True - print_debug("Created Variable %s" % self.name) - - def set_active(self, active): - print_debug("set_active on %s. (old, new) = (%s, %s)" % (self.name, self.active, active)) - if active == self.active: - return # nothing to do - self.active = active - if active: - VariableState.__init__(self) # reset when reactivating - - def __generate_alarm(self, session): - """ Generate an alarm using callback provided by creator - - ... provided that one has not been generated in the last - self.alarm_auto_inhibit_period seconds - """ - t = time.time() - delta = t - self.timeof_last_alarm - print_debug("Time since last alarm for var %s is %d - %d = %d. Refractory period = %d." % (self.name, t, self.timeof_last_alarm, delta, self.alarm_auto_inhibit_period)) - if delta < self.alarm_auto_inhibit_period: - return # we are in the auto inhibit period - do nothing - self.timeof_last_alarm = t - message = "value: %f\nconfig:\n%s" % (self.value, self.xmldoc.toprettyxml()) - - self.alarm_create_callback(self, session, message) - - def update(self, value, session): - """Update the value of the variable using an RRDUpdates object - - Calls self.__generate_alarm() if level has been 'bad' for more than - self.alarm_trigger_period seconds - """ - self.value = value - print_debug("Variable %s set to %f" % (self.name, value)) - if self.test_level(): - # level is bad - self.trigger_down_counter -= rrd_step - if self.trigger_down_counter <= 0: - self.__generate_alarm(session) - # reset trigger counter - self.trigger_down_counter = self.alarm_trigger_period - else: - # level good - reset trigger counter - self.trigger_down_counter = self.alarm_trigger_period - - -class ObjectMonitor: - """Abstract class, used as base for VMMonitor and HOSTMonitor - - Public attributes are uuid, refresh_config() - Inherited classes must implement a public attribute process_rrd_updates() - """ - def __init__(self, uuid): - self.uuid = uuid - self.xmlconfig = None - # "variables" is the public attribute of interest - self.variables = [] - self.refresh_config() - - def refresh_config(self): - if self.__update_xmlconfig(): - # config has changed - reparse it - try: - self.__parse_xmlconfig() - except XmlConfigException as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s config error: %s" % (self.monitortype, self.uuid, errmsg)) - except ExpatError as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s XML parse error: %s" % (self.monitortype, self.uuid, errmsg)) - return True - else: - return False # config unchanged - - def __update_xmlconfig(self): - if self.uuid not in all_xmlconfigs: - xmlconfig = None - else: - xmlconfig = all_xmlconfigs[self.uuid] - changed = False - if xmlconfig != self.xmlconfig: - self.xmlconfig = xmlconfig - changed = True - return changed - - def __parse_xmlconfig(self): - if not self.xmlconfig: - # Possible if this VM/host is not configured yet - self.variables = [] - return - xmldoc = minidom.parseString(self.xmlconfig) - variable_nodes = xmldoc.getElementsByTagName('variable') - variable_names = [] - - for vn in variable_nodes: - # create a variable using the config in vn - var = Variable(vn, self.alarm_create, self.get_default_variable_config) - - # Update list of variable names - if var.name not in variable_names: - variable_names.append(var.name) - - # build list of variables already present with same name - vars_with_same_name = [ v for v in self.variables if v.name == var.name ] - count = 0 - append_var = True - for v in vars_with_same_name: - # this list should be 0 or 1 long! - if count > 0: - log_err("programmer error: found duplicate variable %s (uuid %s)" % (var.name, self.uuid)) - self.variables.remove(v) - continue - count += 1 - - # only replace variable in self.variables if its config has changed. - # This way we don't reset its state - if variable_configs_differ(var, v): - self.variables.remove(v) - else: - append_var = False - - if append_var: - print_debug("Appending %s to list of variables for %s UUID=%s" % (var.name, self.monitortype, self.uuid)) - self.variables.append(var) - - # Now delete any old variables that do not appear in the new variable_nodes - variables_to_remove = [ v for v in self.variables if v.name not in variable_names ] - for v in variables_to_remove: - print_debug("Deleting %s from list of variables for UUID=%s" % (v.name, self.uuid)) - self.variables.remove(v) - - - def get_active_variables(self): - return self.variables - - def process_rrd_updates(self, rrd_updates, session): - print_debug("%sMonitor processing rrd_updates for %s" % (self.monitortype, self.uuid)) - obj_report = rrd_updates.get_obj_report_by_uuid(self.uuid) - num_rows = rrd_updates.get_num_rows() - if not obj_report: - return - params_in_obj_report = obj_report.get_var_names() - - for var in self.get_active_variables(): - # find the subset of the params returned for this object that we need to consolidate into var - params_to_consolidate = filter(var.rrd_regex.match, params_in_obj_report) - for row in range(num_rows): - # Get the values to consolidate - values_to_consolidate = map(lambda param: obj_report.get_value(param, row), params_to_consolidate) - # Consolidate them - value = var.consolidation_fn(values_to_consolidate) - # Pass result on to the variable object - this may result in an alarm being generated - var.update(value, session) - - def alarm_create(self, var, session, message): - "Callback used by Variable var to actually send an alarm" - print_debug("Creating an alarm for %s %s, message: %s" % (self.monitortype, self.uuid, message)) - session.xenapi.message.create("ALARM", var.alarm_priority, self.monitortype, self.uuid, message) - -class VMMonitor(ObjectMonitor): - """Object that maintains state of one VM - - Configured by writing an xml string into an other-config key, e.g. - xe vm-param-set uuid=$vmuuid other-config:perfmon=\ - '' - - Notes: - - Multiple nodes allowed - - full list of child nodes is - * name: what to call the variable (no default) - * alarm_priority: the priority of the messages generated (default '3') - * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') - * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'average' for 'cpu_usage', 'get_percent_fs_usage' for 'fs_usage', 'get_percent_log_fs_usage' for 'log_fs_usage', 'get_percent_mem_usage' for 'mem_usage', & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe vm-data-sources-list uuid=$vmuuid) used to compute value - (only has defaults for "cpu_usage", "network_usage", and "disk_usage") - """ - def __init__(self, *args): - self.monitortype = "VM" - ObjectMonitor.__init__(self, *args) - print_debug("Created VMMonitor with uuid %s" % self.uuid) - - def get_default_variable_config(self, variable_name, config_tag): - "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == "cpu_usage": return 'average' - elif variable_name == "fs_usage": return 'get_percent_fs_usage' - elif variable_name == "log_fs_usage": return 'get_percent_log_fs_usage' - elif variable_name == "mem_usage": return 'get_percent_mem_usage' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == "cpu_usage": return "cpu[0-9]+" - elif variable_name == "network_usage": return "vif_[0-9]+_[rt]x" - elif variable_name == "disk_usage": return "vbd_(xvd|hd)[a-z]+_(read|write)" - elif variable_name == "fs_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "log_fs_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "mem_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "memory_internal_free": return variable_name - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_level': - if variable_name == "fs_usage": return '0.9' # trigger when 90% full - elif variable_name == "log_fs_usage": return '0.9' # trigger when 90% full - elif variable_name == "mem_usage": return '0.95' # tigger when mem demanded is close to phy_mem - else:raise XmlConfigException("variable %s: no default alarm_trigger_level - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_sense': - if variable_name == "memory_internal_free": return "low" - else: return 'high' # trigger if *above* - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) - -class SRMonitor(ObjectMonitor): - """Object that maintains state of one SR - - Configured by writing an xml string into an other-config key, e.g. - xe sr-param-set uuid=$vmuuid other-config:perfmon=\ - '' - - Notes: - - Multiple nodes allowed - - full list of child nodes is - * name: what to call the variable (no default) - * alarm_priority: the priority of the messages generated (default '3') - * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') - * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'get_percent_sr_usage' for 'physical_utilistation', & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe sr-data-sources-list uuid=$sruuid) used to compute value - (has default for "physical_utilistaion") - """ - def __init__(self, *args): - self.monitortype = "SR" - ObjectMonitor.__init__(self, *args) - print_debug("Created SRMonitor with uuid %s" % self.uuid) - - def get_default_variable_config(self, variable_name, config_tag): - "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == 'physical_utilisation': return 'get_percent_sr_usage' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == 'physical_utilisation': return 'physical_utilisation|size' - elif variable_name == "sr_io_throughput_total_per_host": return '_$_DUMMY__' # (these are to drive Host RRDs and so are handled by the HOSTMonitor) - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_level': - if variable_name == "physical_utilistaion": return '0.8' # trigger when 80% full - else:raise XmlConfigException("variable %s: no default alarm_trigger_level - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_sense': return 'high' # trigger if *above* - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) - -class HOSTMonitor(ObjectMonitor): - """Object that maintains state of one Host - - Configured by writing an xml string into an other-config key, e.g. - xe host-param-set uuid=$hostuuid other-config:perfmon=\ - '' - - Notes: - - Multiple nodes allowed - - full list of child nodes is - * name: what to call the variable (no default) - * alarm_priority: the priority of the messages generated (default '3') - * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') - * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'average' for 'cpu_usage' & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe host-data-source-list uuid=$hostuuid) used to compute value - (only has defaults for "cpu_usage", "network_usage", "memory_free_kib" and "sr_io_throughput_total_xxxxxxxx" - where that last one ends with the first eight characters of the SR uuid) - - Also, as a special case for SR throughput, it is possible to configure a Host by - writing xml into the other-config key of an SR connected to it, e.g. - xe sr-param-set uuid=$sruuid other-config:perfmon=\ - ' - - This only works for that one specific variable-name, and rrd_regex must not be specified. - Configuration done on the host directly (variable-name sr_io_throughput_total_xxxxxxxx) takes priority. - """ - def __init__(self, *args): - self.monitortype = "Host" - self.secondary_variables = set() - self.secondary_xmlconfigs = {} # map of sr uuid to xml text - ObjectMonitor.__init__(self, *args) - print_debug("Created HOSTMonitor with uuid %s" % self.uuid) - - def get_default_variable_config(self, variable_name, config_tag): - "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == "cpu_usage": return 'average' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == "cpu_usage": return "cpu[0-9]+" - elif variable_name == "network_usage": return "pif_eth[0-9]+_[rt]x" - elif variable_name == "memory_free_kib": return variable_name - elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", variable_name): return variable_name[3:] - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_sense': - if variable_name == "memory_free_kib": return "low" - else: return 'high' # trigger if *above* level - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) - - def get_active_variables(self): - r = self.variables + [v for v in self.secondary_variables if v.active] - print_debug("Returning active variables: %d main, %d total" % (len(self.variables), len(r))) - return r - - def refresh_config(self): - main_changed = ObjectMonitor.refresh_config(self) - - # Now handle any extra config from SRs. - # This functionality makes this file inelegant but means that it is - # possible to set up an alarm on each host that uses an SR by setting - # appropriate configuration in the SR's other-config. - if self.uuid not in sruuids_by_hostuuid: - print_debug("%s not in sruuids_by_hostuuid" % self.uuid) - self.secondary_variables.clear() - self.secondary_xmlconfigs.clear() - return - - secondary_changed = False - old_sruuids = set(self.secondary_xmlconfigs) # create set of keys - current_sruuids = sruuids_by_hostuuid[self.uuid] # a set already - if old_sruuids != current_sruuids: - print_debug("Changed set of perfmon sruuids for host %s" % self.uuid) - secondary_changed = True - else: - for sruuid in sruuids_by_hostuuid[self.uuid]: - sr_xmlconfig = all_xmlconfigs[sruuid] - # As an optimisation, if xml unchanged then do not re-parse. - # Otherwise we would create Variables which would turn out to be same as existing ones so we would ignore them. - if sruuid in self.secondary_xmlconfigs and self.secondary_xmlconfigs[sruuid] == sr_xmlconfig: - print_debug("Unchanged sr_xmlconfig for sruuid %s" % sruuid) - else: - print_debug("Found new/different sr_xmlconfig for sruuid %s" % sruuid) - secondary_changed = True - break - - if secondary_changed: - try: - self.__parse_secondary_xmlconfigs() - except XmlConfigException as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s secondary config error: %s" % (self.monitortype, self.uuid, errmsg)) - except ExpatError as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s secondary XML parse error: %s" % (self.monitortype, self.uuid, errmsg)) - - if main_changed or secondary_changed: - # Calculate which secondary variables are active, i.e. not overridden by ones configured on the host rather than the SR. - main_names = {v.name for v in self.variables} - for v in self.secondary_variables: - v.set_active(v.name not in main_names) - - def __parse_secondary_xmlconfigs(self): - variable_names = set() # Names of the Variable objects we create based on the xml nodes we find - self.secondary_xmlconfigs.clear() - for sruuid in sruuids_by_hostuuid[self.uuid]: - print_debug("Looking for config on SR uuid %s" % sruuid) - sr_xmlconfig = all_xmlconfigs[sruuid] - self.secondary_xmlconfigs[sruuid] = sr_xmlconfig - xmldoc = minidom.parseString(sr_xmlconfig) - variable_nodes = xmldoc.getElementsByTagName('variable') - found = False - for vn in variable_nodes: - try: - name_element = vn.getElementsByTagName('name')[0] - name = name_element.getAttribute('value') - except IndexError: - log_err("variable missing 'name' tag in perfmon xml config of SR %s" % sruuid) - continue # perhaps other nodes are valid - print_debug("Found variable with name %s on SR uuid %s" % (name, sruuid)) - if name != 'sr_io_throughput_total_per_host': - continue # Do nothing unless the variable is meant for the host - if len(vn.getElementsByTagName('rrd_regex')) > 0: - log_err("Configuration error: rrd_regex must not be specified in config on SR meant for each host") - continue # perhaps another node is valid - if found: - log_err("Configuration error: duplicate variable %s on SR %s" % (name, sruuid)) - # A host can only have one Variable from a given SR since we only accept one kind (one name). - break - found = True - name_override = 'sr_io_throughput_total_%s' % sruuid[0:8] - name_element.setAttribute('value', name_override) - provenance_element = xmldoc.createElement('configured_on') - provenance_element.setAttribute('class', 'SR') - provenance_element.setAttribute('uuid', sruuid) - vn.appendChild(provenance_element) - var = Variable(vn, self.alarm_create, self.get_default_variable_config) - variable_names.add(var.name) - append_var = True - vars_with_same_name = [ v for v in self.secondary_variables if v.name == var.name ] - for v in vars_with_same_name: - # this list should be 0 or 1 long! - # only replace variable in self.secondary_variables if its config has changed. - # This way we don't reset its state - if variable_configs_differ(var, v): - print_debug("Removing existing secondary variable to replace with new: %s" % v.name) - self.secondary_variables.remove(v) - else: - print_debug("Found existing secondary variable with same config: %s" % v.name) - append_var = False - if append_var: - print_debug("Adding %s to set of secondary variables for host UUID=%s" % (var.name, self.uuid)) - self.secondary_variables.add(var) - - # Now that we have read all the xml items, - # delete any old variables that do not appear in the new variable_nodes - print_debug("Going to delete any secondary_variables not in %s" % variable_names) - variables_to_remove = [ v for v in self.secondary_variables if v.name not in variable_names ] - for v in variables_to_remove: - print_debug("Deleting %s from set of secondary variables for UUID=%s" % (v.name, self.uuid)) - self.secondary_variables.remove(v) - -all_xmlconfigs = {} -sruuids_by_hostuuid = {} # Maps host uuid to a set of the uuids of the host's SRs that have other-config:perfmon -def update_all_xmlconfigs(session): - """Update all_xmlconfigs, a global dictionary that maps any uuid - (SR, host or VM) to the xml config string in other-config:perfmon keys - and update sruuids_by_hostuuid which together with all_xmlconfigs allows - lookup of the other-config:perfmon xml of the SRs connected to a host""" - global all_xmlconfigs - global sruuids_by_hostuuid - - all_host_recs = session.xenapi.host.get_all_records() - all_vm_recs = session.xenapi.VM.get_all_records() - all_sr_recs = session.xenapi.SR.get_all_records() - - # build dictionary mapping uuids to other_configs - all_otherconfigs = {} - - for recs in (all_host_recs, all_vm_recs, all_sr_recs): - all_otherconfigs.update([ - (recs[ref]['uuid'], recs[ref]['other_config']) - for ref in recs.keys() - ]) - - # rebuild dictionary mapping uuids to xmlconfigs - all_xmlconfigs.clear() - all_xmlconfigs.update([ - (uuid, other_config['perfmon']) - for (uuid, other_config) in all_otherconfigs.items() - if 'perfmon' in other_config - ]) - - # Rebuild another map - sruuids_by_hostuuid.clear() - for (sr, rec) in all_sr_recs.items(): - if 'perfmon' in rec['other_config']: - sruuid = rec['uuid'] - # If we hadn't done SR.get_all_records we would now do SR.get_PBDs. - host_refs = [session.xenapi.PBD.get_host(pbd) for pbd in rec['PBDs']] - host_uuids = [all_host_recs[ref]['uuid'] for ref in host_refs] - for hu in host_uuids: - if hu in sruuids_by_hostuuid: - sruuids_by_hostuuid[hu].add(sruuid) - else: - sruuids_by_hostuuid[hu] = {sruuid} - -# 5 minute default interval -interval = 300 -interval_percent_dither = 5 -rrd_step = 60 -debug = False - -# rate to call update_all_xmlconfigs() -config_update_period = 1800 - -cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) -cmdmaxlen = 256 - -def main(): - global interval - global interval_percent_dither - global rrd_step - global debug - global config_update_period - maxruns=None - try: - argv = sys.argv[1:] - opts, args = getopt.getopt(argv, "i:n:ds:c:D:", - ["interval=", "numloops=","debug","rrdstep=","config_update_period=","interval_percent_dither="]) - except getopt.GetoptError: - raise UsageException - - configfname = None - for opt, arg in opts: - if opt == '-i' or opt == '--interval': - interval = int(arg) - elif opt == '-n' or opt == '--numloops': - maxruns = int(arg) - elif opt == '-d' or opt == '--debug': - debug = True - elif opt == '-s' or opt == '--rrdstep': - rrd_step = int(arg) - if rrd_step != 5 and rrd_step != 60: - raise UsageException - elif opt == '-c' or opt == '--config_update_period': - config_update_period = int(arg) - elif opt == '-D' or opt == '--interval_percent_dither': - interval_percent_dither = int(arg) - else: - raise UsageException - - # open the cmd socket (over which we listen for commands such as "refresh") - cmdsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - cmdsock.bind(cmdsockname) - - - # The dither on each loop (prevents stampede on master) - rand = random.Random().uniform - dither = (interval * interval_percent_dither)/100.0 - - # Create a XAPI session on first run - restart_session = True - - # Create a client for getting the rrd_updates over HTTP - rrd_updates = RRDUpdates() - - # Work out when next to update all the xmlconfigs for all the - # hosts and all the VMs. This causes a lot of data to be retrieved - # from the master, so we only do it once every config_update_period - # and we cache the results - next_config_update = time.time() - - # monitors for vms running on this host. - # This dictionary uses uuids to lookup each monitor object - vm_mon_lookup = {} - - # monitors for srs plugged on this host - # This dictionary uses uuids to lookup each monitor object - sr_mon_lookup = {} - - # The monitor for the host - host_mon = None - - runs = 0 - while True: - print_debug("Run: %d" % runs) - - # Get new updates - and catch any http errors - try: - # if session has failed on last run we need to restart it - if restart_session: - session = XapiSession() - restart_session = False - - rrd_updates.refresh(session) - - # Should we update all_xmlconfigs - if time.time() >= next_config_update: - print_debug("Updating all_xmlconfigs") - # yes - update all the xml configs: this generates a few LARGE xapi messages from the master - update_all_xmlconfigs(session) - - # Set time when to do this next - next_config_update = time.time() + config_update_period - - # List of VMs present in rrd_updates - vm_uuid_list = rrd_updates.get_uuid_list_by_objtype('vm') - - # Remove any monitors for VMs no longer listed in rrd_updates page - for uuid in vm_mon_lookup.keys(): - if uuid not in vm_uuid_list: - vm_mon_lookup.pop(uuid) - - # Create monitors for VMs that have just appeared in rrd_updates page - for uuid in vm_uuid_list: - if uuid not in vm_mon_lookup.keys(): - vm_mon_lookup[uuid] = VMMonitor(uuid) - else: - # check if the config has changed, e.g. by XenCenter - vm_mon_lookup[uuid].refresh_config() - - # Remove monitor for the host if it's no longer listed in rrd_updates page - # Create monitor for the host if it has just appeared in rrd_updates page - try: - host_uuid = rrd_updates.get_uuid_list_by_objtype('host')[0] # should only ever be one of these - except: - # list may be empty! - host_uuid = None - - if not host_uuid: - host_mon = None - elif not host_mon: - host_mon = HOSTMonitor(host_uuid) - elif host_mon.uuid != host_uuid: - raise PerfMonException("host uuid in rrd_updates changed (old: %s, new %s)" % \ - (host_mon.uuid, host_uuid)) - else: - # check if the config has changed, e.g. by XenCenter - host_mon.refresh_config() - - # List of SRs present in rrd_updates - sr_uuid_list = rrd_updates.get_uuid_list_by_objtype('sr') - print_debug("sr_uuid_list = %s" % sr_uuid_list) - - # Remove monitors for SRs no longer listed in the rrd_updates page - for uuid in sr_mon_lookup.keys(): - if uuid not in sr_uuid_list: - sr_mon_lookup.pop(uuid) - # Create monitors for SRs that have just appeared in rrd_updates page - for uuid in sr_uuid_list: - if uuid not in sr_mon_lookup.keys(): - sr_mon_lookup[uuid] = SRMonitor(uuid) - else: - sr_mon_lookup[uuid].refresh_config() - - # Go through each vm_mon and update it using the rrd_udpates - this may generate alarms - for vm_mon in vm_mon_lookup.values(): - vm_mon.process_rrd_updates(rrd_updates, session) - - # Ditto for the host_mon - if host_mon: - host_mon.process_rrd_updates(rrd_updates, session) - - # And for the sr_mons - for sr_mon in sr_mon_lookup.values(): - sr_mon.process_rrd_updates(rrd_updates, session) - - except socket.error as e: - if e.args[0] == 111: - # "Connection refused" - this happens when we try to restart session and *that* fails - time.sleep(2) - pass - - log_err("caught socket.error: (%s) - restarting XAPI session" % " ".join([str(x) for x in e.args])) - restart_session = True - - except IOError as e: - if e.args[0] == 'http error' and e.args[1] in (401, 500): - # Error getting rrd_updates: 401=Unauthorised, 500=Internal - start new session - pass - elif e.args[0] == 'socket error': - # This happens if we send messages or read other-config:perfmon after xapi is restarted - pass - else: - # Don't know why we got this error - crash, die and look at logs later - raise - - log_err("caught IOError: (%s) - restarting XAPI session" % " ".join([str(x) for x in e.args])) - restart_session = True - - runs += 1 - if maxruns is not None and runs >= maxruns: - break - - # Force collection of cyclically referenced objects cos we don't - # trust GC to do it on its own - gc.collect() - - # Sleep for interval + dither, exiting early if we recv a cmd - timeout = rand(interval, interval + dither) - cmdsock.settimeout(timeout) - try: - cmd = cmdsock.recv(cmdmaxlen) - except socket.timeout: - pass - else: - if cmd == "refresh": - # This forces a re-read of all the configs on the next loop - next_config_update = time.time() - elif cmd == "debug_mem": - debug_mem() - else: - log_err("received unhandled command %s" % cmd) - - # continue to next run - - return 0 - -def sigterm_handler(sig, stack_frame): - log_err("Caught signal %d - exiting" % sig) - sys.exit(1) - -pidfile = "/var/run/perfmon.pid" - -if __name__ == "__main__": - - # setup signal handler to print out notice when killed - signal.signal(signal.SIGTERM, sigterm_handler) - - if '--daemon' in sys.argv[1:]: - sys.argv.remove('--daemon') - if os.fork() != 0: - sys.exit(0) - os.setsid() - sys.stdout=open("/dev/null", 'w') - sys.stdin=open("/dev/null", 'r') - sys.stderr=sys.stdout - - # Exit if perfmon already running - if os.path.exists(pidfile): - pid = open(pidfile).read() - if os.path.exists("/proc/%s" % pid): - log_err("perfmon already running - exiting") - sys.exit(3) - - try: - # Write out pidfile - fd = open(pidfile,"w") - fd.write("%d" % os.getpid()) - fd.close() - - # run the main loop - rc = main() - - except UsageException as e: - # Print the usage - log_err("usage: %s [-i -n -d -s -c -D ] \\\n" \ - "\t[--interval= --numloops= --debug \\\n" \ - "\t --rrdstep= --daemon]\n" \ - "\t --config_update_period=\n" \ - "\t --interval_percent_dither=\n" \ - " interval:\tseconds between reads of http://localhost/rrd_updates?...\n" \ - " loops:\tnumber of times to run before exiting\n" \ - " rrd_step:\tseconds between samples provided by rrd_updates. Valid values are 5 or 60\n" \ - " config_update_period:\tseconds between getting updates of all VM/host records from master\n" \ - " interval_percent_dither:\tmax percent dither in each loop - prevents stampede on master\n" \ - % (sys.argv[0])) - rc = 1 - - except SystemExit: - # we caught a signal which we have already logged - pass - - except Exception as e: - rc = 2 - log_err("FATAL ERROR: perfmon will exit") - log_err("Exception is of class %s" % e.__class__) - ex = sys.exc_info() - err = traceback.format_exception(*ex) - - # Python built-in Exception has args, - # but XenAPI.Failure has details instead. Sigh. - try: - errmsg = "\n".join([ str(x) for x in e.args ]) - # print the exception args nicely - log_err(errmsg) - except Exception as ignored: - try: - errmsg = "\n".join([ str(x) for x in e.details ]) - # print the exception args nicely - log_err(errmsg) - except Exception as ignored: - pass - - # now log the traceback to syslog - for exline in err: - log_err(exline) - - # remove pidfile and exit - os.unlink(pidfile) - sys.exit(rc) diff --git a/scripts/plugins/extauth_hook_ad.py b/scripts/plugins/extauth_hook_ad.py deleted file mode 120000 index 19afff4d393..00000000000 --- a/scripts/plugins/extauth_hook_ad.py +++ /dev/null @@ -1 +0,0 @@ -extauth-hook-AD.py \ No newline at end of file diff --git a/scripts/scalability-tests/event-count.py b/scripts/scalability-tests/event-count.py deleted file mode 100644 index 24f3c0b5354..00000000000 --- a/scripts/scalability-tests/event-count.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -# Count the number of events received from the master - -from __future__ import print_function -import XenAPI, sys, time - -iso8601 = "%Y-%m-%dT%H:%M:%SZ" - - -def main(session): - global iso8601 - - token = '' - call_timeout = 30.0 - - while True: - sys.stdout.flush() - - now = time.time() - now_string = time.strftime(iso8601, time.gmtime(now)) - - try: - output = session.xenapi.event_from(["*"], token, call_timeout) - events = output['events'] - token = output['token'] - print("%s %10d 0" % (now_string, len(events))) - time.sleep(5) - - except KeyboardInterrupt: - break - - except XenAPI.Failure as e: - print(e.details) - sys.exit(1) - - -if __name__ == "__main__": - if len(sys.argv) != 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - url = sys.argv[1] - if url[:5] != "https": - raise Exception("Must use SSL for a realistic test") - - username = sys.argv[2] - password = sys.argv[3] - - new_session = XenAPI.Session(url) - try: - new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-eventcount.py") - except XenAPI.Failure as f: - print("Failed to acquire a session: %s" % f.details) - sys.exit(1) - - try: - main(new_session) - finally: - new_session.xenapi.logout() diff --git a/scripts/scalability-tests/ping-master.py b/scripts/scalability-tests/ping-master.py deleted file mode 100755 index 048c5d4c938..00000000000 --- a/scripts/scalability-tests/ping-master.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python - -# Send back-to-back 'Host.get_servertime' calls to simulate the GUI's heartbeat and record latency. - -from __future__ import print_function -import XenAPI, sys, time - -iso8601 = "%Y%m%dT%H:%M:%SZ" - -def main(session): - global iso8601 - pool = session.xenapi.pool.get_all()[0] - host = session.xenapi.pool.get_master(pool) - while True: - start = time.time() - session.xenapi.host.get_servertime(host) - latency = time.time() - start - date = time.strftime(iso8601, time.gmtime(start)) - print("%s %.2f" % (date, latency)) - sys.stdout.flush() - time.sleep(5) - - -if __name__ == "__main__": - if len(sys.argv) != 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - url = sys.argv[1] - if url[:5] != "https": - raise "Must use SSL for a realistic test" - - username = sys.argv[2] - password = sys.argv[3] - - session = XenAPI.Session(url) - session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-pingmaster.py") - try: - main(session) - finally: - session.xenapi.logout() - diff --git a/scripts/scalability-tests/plot-result b/scripts/scalability-tests/plot-result deleted file mode 100755 index 830590c306b..00000000000 --- a/scripts/scalability-tests/plot-result +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./plot-result vm_per_host host1 ... hostN -# - -if [ $# -le 1 ]; then - echo "Usage: $0 vm_per_host host1 [host2 ... hostN]" - echo "${0} plot the result of ./stress-tests. Need to have all the resulting .dat files of the test in the current directory. Results are .ps files." - exit 1 -fi - -VM_PER_HOST=$1 - -shift -HOSTS=$@ -MASTER=$1 - -for OP in "start-shutdown" "suspend-resume" "reboot" "live-migrate" "non-live-migrate"; do - STR="" - for HOST in $HOSTS; do - for i in `seq 1 ${VM_PER_HOST}`; do - if [ "${STR}" == "" ] - then - STR="'debian-etch-${HOST}-${i}.${OP}.dat' title '${HOST}-${i}' with lines" - else - STR+=", 'debian-etch-${HOST}-${i}.${OP}.dat' title '${HOST}-${i}' with lines" - fi - done - done - echo "set terminal postscript color eps" > tmp.conf - echo "set output '${OP}.ps'" >> tmp.conf - echo "plot ${STR}" >> tmp.conf - gnuplot tmp.conf -done - - diff --git a/scripts/scalability-tests/pool-size-tests b/scripts/scalability-tests/pool-size-tests deleted file mode 100755 index b3ea46eb9c7..00000000000 --- a/scripts/scalability-tests/pool-size-tests +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./test-pool-size n -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -ne 1 ]; then - echo "Usage: $0 number_of_vm" - echo "Need :" - echo " * ./repeat, ./repeat-clone, ./repeat-start and ./repeat-destroy scripts to be in the same directory that ${0};" - echo " * a pool already set up with a shared NFS storage and a HVM VM called dsl;" - echo " * ${0} must be started on the master of this pool;" - echo "${0} clones , then starts them all, then shutdown them all, then destroy them all. Then it ejects one host of the pool, and do the same tests again until the master remains the last host in the pool. Each operation is recoreded into a .dat file." - exit 1 -fi - -N=${1} -IFS=:',' -HOSTS=`xe host-list --minimal` -MASTER=`xe pool-list params=master --minimal` - -c=`xe host-list --minimal | sed -e 's/,/\n/g' | wc -l` - - -#main loop -for HOST in $HOSTS; -do - if [ ${HOST} != ${MASTER} ]; then - ./repeat-clone ${N} dsl > clone-${c}.dat - ./repeat-start ${N} dsl > start-${c}.dat - ./repeat ${N} shutdown dsl --force > shutdown-${c}.dat - ./repeat-destroy ${N} dsl > destroy-${c}.dat - - echo "Ejecting ${HOST}." - xe pool-eject host-uuid=${HOST} --force - #xe host-forget uuid=${HOST} - ((c--)) - echo "Ejected." - fi -done diff --git a/scripts/scalability-tests/provision-vm b/scripts/scalability-tests/provision-vm deleted file mode 100755 index 03fa99663e3..00000000000 --- a/scripts/scalability-tests/provision-vm +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./provision-vm vm_per_host host1 host2 ... hostN -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -le 1 ]; then - echo "Usage: ${0} vm_per_host host1 [host2 ... hostN]" - echo "${0} provisions debiant-etch VMs on each host and installs them on a local VHD disk. Moreover, all the hosts join a common pool." - echo "if PROVISION_VM_WITH_CD is set to 1, then attach guest tools ISO CD-ROM to the initial Debian Etch VM before cloning it." - exit 1 -fi - -VM_PER_HOST=$1 - -shift -HOSTS=$@ -MASTER=$1 - -if [ "${PROVISION_VM_WITH_CD}" == "1" ]; then - DEB="debian-etch-withCD" -else - DEB="debian-etch" -fi - -install-vhd () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - SR=`${XE} sr-list name-label='Local storage' --minimal` - if [ $SR ] - then - -# forget the local storage - echo "[${HOST}] Forgeting local storage." - PBD=`${XE} sr-list uuid=$SR params=PBDs --minimal` - ${XE} pbd-unplug uuid=${PBD} - ${XE} sr-forget uuid=${SR} - echo "[${HOST}] Forgotten." - -# build a local VHD storage - echo "[${HOST}] Creating a local VHD storage." - SR=`${XE} sr-create type=ext name-label=localvhd device-config:device=/dev/sda3` - ${XE} pool-param-set uuid=$(${XE} pool-list params=uuid --minimal) default-SR=${SR} crash-dump-SR=${SR} suspend-image-SR=${SR} - echo "[${HOST}] Created." - - fi -} - -install () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "[${HOST}] Installing the Debian Etch VM." - UUID=`${XE} vm-install new-name-label=${DEB} template="Debian Etch 4.0"` - echo "[${HOST}] Installed." - - echo "[${HOST}] Setting the IP address and the memory size of the VM." - NETWORK=`${XE} network-list bridge=xenbr0 --minimal` - VIF=`${XE} vif-create vm-uuid=${UUID} network-uuid=${NETWORK} device=0` - ${XE} vm-param-set uuid=${UUID} PV-args="noninteractive" - ${XE} vm-param-set uuid=${UUID} memory-static-max="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-static-min="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-dynamic-max="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-dynamic-min="50MiB" - echo "[${HOST}] Set." - - if [ "${PROVISION_VM_WITH_CD}" == "1" ]; then - echo "[${HOST}] Attaching a CD-ROM." - TOOLS_ISO=`${XE} vdi-list is-tools-iso=ture params=name-label --minimal` - ${XE} vm-cd-add vm=${DEB} cd-name=${TOOLS_ISO} device=3 - echo "[${HOST}] Attached." - fi - -} - -#start () { -# HOST=$1 -# XE="xe -u root -pw xenroot -s ${HOST}" -# -# echo "[${HOST}] Starting VM." -# ${XE} vm-start vm="${DEB}" -# UUID=`${XE} vm-list name-label=${DEB} params=uuid --minimal` -# -# echo "[${HOST}] Waiting for the IP address of the VM to appear. This can take a minute or so." -# RC=1 -# while [ ${RC} -ne 0 ] -# do -# sleep 10 -# IP=`${XE} vm-param-get uuid=${UUID} param-name=networks param-key="0/ip"` -# RC=$? -# done -# -# echo "[${HOST}] Debian Etch VM installed (IP=${IP})." -#} - -#shutdown () { -# HOST=$1 -# XE="xe -u root -pw xenroot -s ${HOST}" -# -# echo "[${HOST}] Shutting down the VM." -# ${XE} vm-shutdown vm=${DEB} -# echo "[${HOST}] Shut down." -#} - -clone () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "# vm_number cumulative_time load_average vhd_size" > clone-${DEB}-${HOST}.dat - SR=`${XE} sr-list --minimal name-label=localvhd` - START=$(date +%s) - - for i in `seq 1 ${VM_PER_HOST}`; do - echo "[${HOST}] Cloning VM ${i}/${VM_PER_HOST}." - TMP=`${XE} vm-clone vm=${DEB} new-name-label=${DEB}-${HOST}-${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`${XE} host-data-source-query data-source=loadavg host=${HOST}` - VHDSIZE=`${XE} vdi-list --minimal sr-uuid=${SR} | sed -e 's/,/\n/g' | wc -l` - echo "${i} ${DIFF} ${LOADAVG} ${VHDSIZE}" >> clone-${DEB}-${HOST}.dat - echo "[${HOST}] Done." - done -} - -uninstall () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "[{$HOST}] Uninstalling the Debian Etch initial VM." - ${XE} vm-uninstall force=true vm=${DEB} - echo "[${HOST}] Uninstalled." -} - -join-master () { - HOST=$1 - if [ ${HOST} != ${MASTER} ] - then - XE="xe -u root -pw xenroot -s ${HOST}" - echo "[${HOST}] Joining ${MASTER} pool." - ${XE} pool-join master-address=${MASTER} master-username=root master-password=xenroot; - echo "[${HOST}] Joined." - fi -} - -#main loop -echo "Provisioning ${VM_PER_HOST} VMs on hosts: ${HOSTS} (master is ${MASTER})." -for HOST in $HOSTS; -do - (install-vhd $HOST; install $HOST; clone $HOST; uninstall $HOST; join-master $HOST) & -done diff --git a/scripts/scalability-tests/repeat b/scripts/scalability-tests/repeat deleted file mode 100755 index c2990a2d171..00000000000 --- a/scripts/scalability-tests/repeat +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -le 2 ]; then - echo "usage: $0 n operation vm_name [optional arguments]" - exit 1 -fi -N=$1 -OP=$2 -VM=$3 -EXTRA=$4 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average" - -perform () { - i=$1 - TMP=`xe vm-${OP} ${EXTRA} vm=${VM}${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}"; -} - -for i in `seq 1 ${N}`; do - perform $i -done diff --git a/scripts/scalability-tests/repeat-clone b/scripts/scalability-tests/repeat-clone deleted file mode 100755 index f293465b605..00000000000 --- a/scripts/scalability-tests/repeat-clone +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat-clone n vm_name -# - -if [ $# -ne 2 ]; then - echo "usage: $0 n vm_name" - exit 1 -fi -N=$1 -VM=$2 - -SR=`xe sr-list --minimal name-label='NFS virtual disk storage'` -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average vhd_size" - -perform () { - i=$1 - TMP=`xe vm-clone vm=${VM} new-name-label=${VM}${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - VHDSIZE=` xe vdi-list --minimal sr-uuid=${SR} | sed -e 's/,/\n/g' | wc -l` - echo "${i} ${DIFF} ${LOADAVG} ${VHDSIZE}" -} - -for i in `seq 1 ${N}`; do - perform $i -done diff --git a/scripts/scalability-tests/repeat-destroy b/scripts/scalability-tests/repeat-destroy deleted file mode 100755 index b8031e781e4..00000000000 --- a/scripts/scalability-tests/repeat-destroy +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -ne 2 ]; then - echo "usage: $0 n vm_name" - exit 1 -fi -N=$1 -VM=$2 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average" -perform () { - i=$1 - VM_UUID=`xe vm-list name-label=${VM}${i} params=uuid --minimal` - if [ "${VM_UUID}" != "" ]; then - TMP=`xe vm-destroy uuid=${VM_UUID}` - fi - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}"; -} - -for i in `seq 1 ${N}`; do - perform $i; -done diff --git a/scripts/scalability-tests/repeat-start b/scripts/scalability-tests/repeat-start deleted file mode 100755 index a439b7ac8b9..00000000000 --- a/scripts/scalability-tests/repeat-start +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -ne 2 ]; then - echo "Usage: $0 n vm_name" - echo "Starts VMs nammed vm_name<1> .. vm_name and output the time taken and the load average." - echo "if WAIT_FOR_IP is set to 1, then wait the IP address to appear before starting the next VM. need xgetip executable to be in the current directory." - exit 1 -fi - -N=$1 -VM_NAME=$2 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -wait_IP () { - i=$1 - VM_UUID=`xe vm-list name-label=${VM_NAME}${i} params=uuid --minimal` - MAC=`xe vif-list vm-uuid=${VM_UUID} params=MAC --minimal` - echo "Waiting for the IP address of ${VM_NAME}${i} to appear." - IP=`./xgetip xenbr0 ${MAC} &> /dev/null` - echo "IP address of ${VM_NAME}${i} is ${IP}." -} - -echo "# vm_number cumulative_time load_average" - -perform () { - i=$1 - TMP=`xe vm-start vm=${VM_NAME}${i}` - if [ "${WAIT_FOR_IP}" == "1" ]; then - wait_IP ${i} - fi - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}" -} - -for i in `seq 1 ${N}`; do - perform $i -done diff --git a/scripts/scalability-tests/start-tests b/scripts/scalability-tests/start-tests deleted file mode 100755 index 06fc671f135..00000000000 --- a/scripts/scalability-tests/start-tests +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./test-pool-size n vm_name -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -ne 2 ]; then - echo "Usage: $0 number_of_vm initial_vm_name" - echo "Need :" - echo " * ./repeat, ./repeat-clone, ./repeat-start and ./repeat-destroy scripts to be in the same directory that ${0};" - echo " * a pool already set up with a shared NFS storage and a HVM VM called dsl;" - echo " * ${0} must be started on the master of this pool;" - echo "${0} clones , then starts them all, then shutdown them all, then destroy them all." - echo "If WAIT_FOR_IP is set to 1, the script waits for the IP address of the VM to appear before starting the next VM." - exit 1 -fi - -N=${1} -VM=${2} - -./repeat-clone ${N} ${VM} > clone-${VM}.dat -./repeat-start ${N} ${VM} > start-${VM}.dat -./repeat ${N} shutdown ${VM} --force > shutdown-${VM}.dat -./repeat-destroy ${N} ${VM} > destroy-${VM}.dat \ No newline at end of file diff --git a/scripts/scalability-tests/stress-tests b/scripts/scalability-tests/stress-tests deleted file mode 100755 index e193728c9e7..00000000000 --- a/scripts/scalability-tests/stress-tests +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./stress-tests number_of_tests vm_per_host master slave1 slave2 ... slaveN -# - -if [ $# -le 2 ]; then - echo "Usage: $0 number_of_tests vm_per_host master [slave1 ... slaveN]" - echo "You need debian-etch--<1..vm_per_host> VMs installed in each host of the pool (use ./provision-vm to set them up)." - echo "${0} is a XenRT-like script. It performs: " - echo " for each VM, do sequentialy:" - echo " start/wait IP/shutdown" - echo " suspend/resume" - echo " reboot" - echo " live migrate" - echo " non-live migrate" - exit 1 -fi - -N=$1 -VM_PER_HOST=$2 - -shift -shift -HOSTS=$@ -MASTER=$1 - -XE="xe -u root -pw xenroot -s ${MASTER}" - -wait_IP () { - VM=$1 - UUID=`${XE} vm-list name-label=${VM} params=uuid --minimal` - RC=1 - while [ ${RC} -ne 0 ] - do - sleep 2 - IP=`${XE} vm-param-get uuid=${UUID} param-name=networks param-key="0/ip" &> /dev/null` - RC=$? - done -} - -start () { - VM=$1 - - ${XE} vm-start vm=${VM} - wait_IP ${VM} -} - -perform () { - OP=$1 - VM=$2 - EXTRA=$3 - - ${XE} vm-${OP} vm=${VM} $EXTRA -} - -tests () { - HOST=$1 - VM=$2 - - echo "[${VM}] start/stop tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - start ${VM}; - perform shutdown ${VM}; - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.start-shutdown.dat - done - - echo "[${VM}] suspend/resume tests." - start ${VM} - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform suspend ${VM} - perform resume ${VM} - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.suspend-resume.dat - done - - echo "[${VM}] reboot tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform reboot ${VM} - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.reboot.dat - done - - wait_IP ${VM} - - echo "[${VM}] live migrate tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform migrate ${VM} "live=true host=${HOST}" - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.live-migrate.dat - done - - echo "[${VM}] non-live migrate tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform migrate ${VM} "live=false host=${HOST}" - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.non-live-migrate.dat - done - - perform shutdown ${VM} -} - -for HOST in ${HOSTS}; do - for i in `seq 1 ${VM_PER_HOST}`; do - VM="debian-etch-${HOST}-$i" - echo "Starting tests on ${VM}." - tests ${HOST} ${VM} & - done -done diff --git a/scripts/templates/debian b/scripts/templates/debian deleted file mode 100644 index 9350a40a57d..00000000000 --- a/scripts/templates/debian +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2005-2007 XenSource, Inc - -# Code ripped out of 'xgt' script for now -from __future__ import print_function -import commands, xmlrpclib, os, sys, httplib, socket, urllib2, signal - -verbose = True - -##### begin hack. Provide xmlrpc over UNIX domain socket (cut+pasted from eliloader): -class UDSHTTPConnection(httplib.HTTPConnection): - """ Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain - sockets. """ - def connect(self): - path = self.host.replace("_", "/") - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.sock.connect(path) - -class UDSHTTP(httplib.HTTP): - _connection_class = UDSHTTPConnection - -class UDSTransport(xmlrpclib.Transport): - def make_connection(self, host): - return UDSHTTP(host) - -def xapi_local(): - return xmlrpclib.Server("http://_var_xapi_xapi/", transport=UDSTransport()) -##### end hack. - - -class CommandException(Exception): - pass - - -def run(cmd, *args): - debug("+ " + cmd % args) - (ret, out) = commands.getstatusoutput(cmd % args) - if verbose: - try: - for line in out.split("\n"): - log("| " + line) - except TypeError as e: - pass - if ret != 0: - debug ("run - command %s failed with %d" , cmd, ret) - raise CommandException(out) - return out - -def log(fmt, *args): - print(fmt % args) - -def debug(msg, *args): - if verbose: - print(msg % args) - -def create_partition(lvpath): - # 1. write a partition table: - pipe = os.popen('/sbin/fdisk %s' % lvpath, 'w') - - pipe.write('n\n') # new partition - pipe.write('p\n') # primary - pipe.write("1\n") # 1st partition - pipe.write('\n') # default start cylinder - pipe.write('\n') # size: as big as image - pipe.write('w\n') # write partition table - - # XXX we must ignore certain errors here as fdisk will - # sometimes return non-zero signalling error conditions - # we don't care about. Should fix to detect these cases - # specifically. - rc = pipe.close() - if rc == None: - rc = 0 - log("fdisk exited with rc %d (some non-zero exits can be ignored safely)." % rc) - -def map_partitions(lvpath): - run("/sbin/kpartx -a %s", lvpath) - ps = [] - for line in run("/sbin/kpartx -l %s" % lvpath).split("\n"): - ps.append("/dev/mapper/" + line.split()[0]) - return ps - -def unmap_partitions(lvpath): - run("/sbin/kpartx -d %s", lvpath) - -def umount(mountpoint): - run("umount -l %s",mountpoint) - -if __name__ == "__main__": - #os.setpgrp() - xvda = os.getenv("xvda") - xvdb = os.getenv("xvdb") - debug("Guest's xvda is on %s" % xvda) - debug("Guest's xvdb is on %s" % xvdb) - if xvda == None or xvdb == None: - raise "Need to pass in device names for xvda and xvdb through the environment" - - vm = os.getenv("vm") - - server = xapi_local () - try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - uuid = server.VM.get_uuid(session_id, vm)['Value'] - mountpoint = "/tmp/installer/%s" % (uuid) - finally: - server.session.logout(session_id) - - def sighandler(signum, frame): - umount(mountpoint) - os.killpg(0,signal.SIGKILL) - exit(1) - - signal.signal(signal.SIGTERM,sighandler) - - create_partition(xvda) - create_partition(xvdb) - - try: - xvda_parts = map_partitions(xvda) - - run("/sbin/mkfs.ext3 %s", xvda_parts[0]) - - xgt = "@SHAREDIR@/packages/xgt/%s.xgt" % os.path.basename(sys.argv[0]) - - run("/bin/mkdir -p %s", mountpoint) - try: - run("/bin/mount %s %s", xvda_parts[0], mountpoint) - run("/usr/bin/unzip -p %s root.tar.bz2 | tar -C %s -jx", xgt, mountpoint) - finally: - run("/bin/umount %s", mountpoint) - run("/bin/rmdir %s", mountpoint) - run("/usr/bin/unzip -p %s swap.img | dd of=%s oflag=direct bs=1M", xgt, xvdb) - - try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - vbds = server.VM.get_VBDs(session_id, vm)['Value'] - for i in vbds: - dev = server.VBD.get_userdevice(session_id, i)['Value'] - if dev == "0": - server.VBD.set_bootable(session_id, i, True) - finally: - server.session.logout(session_id) - finally: - unmap_partitions(xvda) diff --git a/scripts/templates/debug b/scripts/templates/debug deleted file mode 100755 index 85656ebf2d9..00000000000 --- a/scripts/templates/debug +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - - -# Script should be passed a session_id, VM reference and set of block -# devices via the environment - -set > /tmp/debug-install-script \ No newline at end of file diff --git a/scripts/test_static_vdis.py b/scripts/test_static_vdis.py deleted file mode 100644 index b0ab6ad5939..00000000000 --- a/scripts/test_static_vdis.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -# unittest for static-vdis - -import unittest -from mock import MagicMock -import sys -import os -import subprocess -import tempfile - -# mock modules to avoid dependencies -sys.modules["XenAPI"] = MagicMock() -sys.modules["inventory"] = MagicMock() - -def import_from_file(module_name, file_path): - """Import a file as a module""" - if sys.version_info.major == 2: - return None - else: - from importlib import machinery, util - loader = machinery.SourceFileLoader(module_name, file_path) - spec = util.spec_from_loader(module_name, loader) - assert spec - assert spec.loader - module = util.module_from_spec(spec) - # Probably a good idea to add manually imported module stored in sys.modules - sys.modules[module_name] = module - spec.loader.exec_module(module) - return module - -def get_module(): - """Import the static-vdis script as a module for executing unit tests on functions""" - testdir = os.path.dirname(__file__) - return import_from_file("static_vdis", testdir + "/static-vdis") - -static_vdis = get_module() - -@unittest.skipIf(sys.version_info < (3, 0), reason="requires python3") -class TestReadWriteFile(unittest.TestCase): - def test_write_and_read_whole_file(self): - """Test read_whole_file and write_whole_file""" - test_file = tempfile.NamedTemporaryFile(delete=True) - filename = str(test_file.name) - content = r"""def read_whole_file(filename): - with open(filename, 'r', encoding='utf-8') as f: - return ''.join(f.readlines()).strip() - -def write_whole_file(filename, contents): - with open(filename, "w", encoding='utf-8') as f: - f.write(contents)""" - static_vdis.write_whole_file(filename, content) - expected_content = static_vdis.read_whole_file(filename) - self.assertEqual(expected_content, content) - - \ No newline at end of file diff --git a/scripts/time-vm-boots.py b/scripts/time-vm-boots.py deleted file mode 100755 index 85ec19f20f8..00000000000 --- a/scripts/time-vm-boots.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2006-2007 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - -# Simple python example to demonstrate the event system. Logs into the server, -# registers for events on the VM_guest_metrics and computes the time taken for -# the guest agent to report an IP address. - -from __future__ import print_function -import XenAPI -import sys -import time - -vgm_to_vm = {} - - -def register_vm_metrics(session, vm_ref, vgm): - global vgm_to_vm - - try: - # avoid putting invalid references in the cache - tmp = session.xenapi.VM_guest_metrics.get_other(vgm) - vgm_to_vm[vgm] = vm_ref - except: - pass - - -def vm_of_metrics(ref): - global vgm_to_vm - if not(ref in vgm_to_vm.keys()): - return None - return vgm_to_vm[ref] - -interesting_vms = [] -vm_boot_times = {} -boots_seen = 0 - - -def dump_table(session): - global vm_boot_times - for vm_ref in vm_boot_times.keys(): - name = session.xenapi.VM.get_name_label(vm_ref) - print("%s %s" % (name, vm_boot_times[vm_ref])) - - -def seen_possible_boot(session, vm_ref): - global vm_boot_times - global interesting_vms - global boots_seen - if not(vm_ref in vm_boot_times.keys()) and vm_ref in interesting_vms: - t = time.strftime( "%Y%m%dT%H:%M:%SZ", time.gmtime()) - vm_boot_times[vm_ref] = t - boots_seen += 1 - - name = session.xenapi.VM.get_name_label(vm) - print("%d %s %s" % (boots_seen, name, t), file=sys.stdout) - print("%d %s %s" % (boots_seen, name, t), file=sys.stderr) - sys.stderr.flush() - - -def process_guest_metrics(session, ref, snapshot): - if "other" in snapshot.keys(): - other = snapshot["other"] - if "feature-shutdown" in other.keys(): - the_vm = vm_of_metrics(ref) - seen_possible_boot(session, the_vm) - - -def poll_metrics(session): - while True: - time.sleep(10) - all_recs = session.xenapi.VM_guest_metrics.get_all_records() - for ref in all_recs.keys(): - snapshot = all_recs[ref] - process_guest_metrics(session, ref, snapshot) - - -def process_metrics_event(session, ref): - vm_ref = vm_of_metrics(ref) - if vm_ref is None: - return - if session.xenapi.VM.get_power_state(vm_ref) != "Running": - return - other = {} - try: - other=session.xenapi.VM_guest_metrics.get_other(ref) - except Exception as e: - print(repr(e)) - - if "feature-shutdown" in other.keys(): - seen_possible_boot(session, vm_ref) - - -def watch_events_on_vm(session): - try: - token = '' - call_timeout = 30.0 - while True: - output = session.xenapi.event_from(["VM", "VM_guest_metrics"], token, call_timeout) - events = output['events'] - token = output['token'] - - for event in events: - if event['operation'] == 'del': - continue - if event['class'] == 'vm' and event['operation'] == 'mod': - register_vm_metrics(session, event['ref'], event['snapshot']['guest_metrics']) - continue - if event['class'] == 'vm_guest_metrics': - process_metrics_event(session, event['ref']) - continue - - except XenAPI.Failure as e: - print(e.details) - sys.exit(1) - finally: - session.xenapi.session.logout() - - -if __name__ == "__main__": - if len(sys.argv) > 4 or len(sys.argv) < 2: - print(""" -Watches all offline VMs for boots -Usage: - %s -or - %s [http://]localhost [] [] -""" % (sys.argv[0], sys.argv[0])) - sys.exit(1) - - url = sys.argv[1] - username = sys.argv[2] if len(sys.argv) > 2 else "" - password = sys.argv[3] if len(sys.argv) > 3 else "" - - if url == "http://localhost" or url == "localhost": - new_session = XenAPI.xapi_local() - else: - new_session = XenAPI.Session(url) - - # First acquire a valid session by logging in - try: - new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-timevmboots.py") - except XenAPI.Failure as f: - print("Failed to acquire a session: %s" % f.details) - sys.exit(1) - - # We start watching all Halted VMs - all_halted_vms = new_session.xenapi.VM.get_all_records() - for vm in all_halted_vms.keys(): - vm_rec = all_halted_vms[vm] - if vm_rec["power_state"] == "Halted" and not vm_rec["is_a_template"]: - interesting_vms.append(vm) - print("Watching %d offline VMs" % (len(interesting_vms)), file=sys.stderr) - - watch_events_on_vm(new_session) diff --git a/scripts/xc.py b/scripts/xc.py deleted file mode 100644 index 25723e2e7e0..00000000000 --- a/scripts/xc.py +++ /dev/null @@ -1,12 +0,0 @@ - -class xc : - def __init__(self): - self.d = {"XenServer" : "SDK"} - self.s = "SDK" - def readconsolering(self): - return self.s - def physinfo(self): - return self.d - def xeninfo(self): - return self.d - diff --git a/scripts/xe-backup-metadata b/scripts/xe-backup-metadata index 43c4617ec3b..19f0cf0e4a9 100755 --- a/scripts/xe-backup-metadata +++ b/scripts/xe-backup-metadata @@ -51,7 +51,7 @@ function usage { function uuid5 { # could use a modern uuidgen but it's not on XS 8 # should work with Python 2 and 3 - python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" + python3 -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" } function test_sr { diff --git a/scripts/xe-reset-networking b/scripts/xe-reset-networking deleted file mode 100755 index a5bd437f9d3..00000000000 --- a/scripts/xe-reset-networking +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 - -""" -Copyright (C) 2006-2009 Citrix Systems Inc. -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published -by the Free Software Foundation; version 2.1 only. with the special -exception on linking described in file LICENSE. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. -""" -from __future__ import print_function - -import sys -import os -import time -import re -from optparse import OptionParser -#import XenAPI - -pool_conf = '@ETCXENDIR@/pool.conf' -inventory_file = '@INVENTORY@' -management_conf = '/etc/firstboot.d/data/management.conf' -network_reset = '/var/tmp/network-reset' - -def read_dict_file(fname): - f = open(fname, 'r') - d = {} - for l in f.readlines(): - kv = l.split('=') - d[kv[0].strip()] = kv[1].strip().strip("'") - return d - -def read_inventory(): - return read_dict_file(inventory_file) - -def read_management_conf(): - return read_dict_file(management_conf) - -def write_inventory(inventory): - f = open(inventory_file, 'w') - for k in inventory: - f.write(k + "='" + inventory[k] + "'\n") - f.flush() - os.fsync(f.fileno()) - f.close() - -def valid_vlan(vlan): - if not re.match('^\d+$', vlan): - return False - if int(vlan)<0 or int(vlan)>4094: - return False - return True - -if __name__ == "__main__": - parser = OptionParser() - parser.add_option("-m", "--master", help="Master's address", dest="address", default=None) - parser.add_option("--device", help="Device name of new management interface", dest="device", default=None) - parser.add_option("--mode", help='IP configuration mode for new management interface: "none", "dhcp" or "static" (default is dhcp)', dest="mode", default="dhcp") - parser.add_option("--mode-v6", help='IPv6 configuration mode for new management interface: "none", "dhcp", "autoconf" or "static" (default is none)', dest="mode_v6", default="none") - parser.add_option("--novlan", help="no vlan is used for new management interface", dest="novlan", action="store_const", const=True, default=False) - parser.add_option("--vlan", help="vlanID for new management interface to be on vlan network", dest="vlan", default=None) - parser.add_option("--ip", help="IP address for new management interface", dest="ip", default='') - parser.add_option("--ipv6", help="IPv6 address (CIDR format) for new management interface", dest="ipv6", default='') - parser.add_option("--netmask", help="Netmask for new management interface", dest="netmask", default='') - parser.add_option("--gateway", help="Gateway for new management interface", dest="gateway", default='') - parser.add_option("--gateway-v6", help="IPv6 Gateway for new management interface", dest="gateway_v6", default='') - parser.add_option("--dns", help="DNS server for new management interface", dest="dns", default='') - (options, args) = parser.parse_args() - - # Determine pool role - try: - f = open(pool_conf, 'r') - try: - l = f.readline() - ls = l.split(':') - if ls[0].strip() == 'master': - master = True - address = 'localhost' - else: - master = False - if options.address == None: - address = ls[1].strip() - else: - address = options.address - finally: - f.close() - except: - pass - - # Get the management device from the firstboot data if not specified by the user - if options.device == None: - try: - conf = read_management_conf() - device = conf['LABEL'] - except: - print("Could not figure out which interface should become the management interface. \ - Please specify one using the --device option.") - sys.exit(1) - else: - device = options.device - - # Get the VLAN if provided in the firstboot data and not specified by the user - vlan = None - if options.vlan: - if options.novlan: - parser.error('"--vlan " and "--novlan" should not be used together') - sys.exit(1) - if not valid_vlan(options.vlan): - print("VLAN tag you gave was invalid, It must be between 0 and 4094") - sys.exit(1) - vlan = options.vlan - elif not options.novlan: - try: - conf = read_management_conf() - vlan = conf['VLAN'] - except KeyError: - pass - - # Determine IP configuration for management interface - options.mode = options.mode.lower() - if options.mode not in ["none", "dhcp", "static"]: - parser.error('mode should be either "none", "dhcp" or "static"') - sys.exit(1) - - options.mode_v6 = options.mode_v6.lower() - if options.mode not in ["none", "autoconf", "dhcp", "static"]: - parser.error('mode-v6 should be either "none", "autoconf", "dhcp" or "static"') - sys.exit(1) - - if options.mode == "none" and options.mode_v6 == "none": - parser.error("Either mode or mode-v6 must be not 'none'") - sys.exit(1) - - if options.mode == 'static' and (options.ip == '' or options.netmask == ''): - parser.error("if static IP mode is selected, an IP address and netmask need to be specified") - sys.exit(1) - - if options.mode_v6 == 'static': - if options.ipv6 == '': - parser.error("if static IPv6 mode is selected, an IPv6 address needs to be specified") - elif options.ipv6.find('/') == -1: - parser.error("Invalid format: IPv6 must be specified with CIDR format: /") - sys.exit(1) - - # Warn user - if not os.access('/tmp/fist_network_reset_no_warning', os.F_OK): - configuration = [] - configuration.append("Management interface: " + device) - configuration.append("IP configuration mode: " + options.mode) - configuration.append("IPv6 configuration mode:" + options.mode_v6) - if vlan != None: - configuration.append("Vlan: " + vlan) - if options.mode == "static": - configuration.append("IP address: " + options.ip) - configuration.append("Netmask: " + options.netmask) - if options.mode_v6 == "static": - configuration.append("IPv6/CIDR: " + options.ipv6) - if options.gateway != '': - configuration.append("Gateway: " + options.gateway) - if options.gateway_v6 != '': - configuration.append("IPv6 gateway: " + options.gateway_v6) - if options.dns != '': - configuration.append("DNS server(s): " + options.dns) - if master == False: - configuration.append("Pool master's address: " + address) - warning = """---------------------------------------------------------------------- -!! WARNING !! - -This command will reboot the host and reset its network configuration. -Any running VMs will be forcefully shutdown. - -Before completing this command: -- Where possible, cleanly shutdown all VMs running on this host. -- Disable HA if this host is part of a resource pool with HA enabled. ----------------------------------------------------------------------- - -Your network will be re-configured as follows:\n\n""" - confirmation = """\n\nIf you want to change any of the above settings, type 'no' and re-run -the command with appropriate arguments (use --help for a list of options). - -Type 'yes' to continue. -Type 'no' to cancel. -""" - res = input(warning + '\n'.join(configuration) + confirmation) - if res != 'yes': - sys.exit(1) - - # Update master's IP, if needed and given - if master == False and options.address != None: - print("Setting master's ip (" + address + ")...") - try: - f = open(pool_conf, 'w') - f.write('slave:' + address) - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Construct bridge name for management interface based on convention - if device[:3] == 'eth': - bridge = 'xenbr' + device[3:] - else: - bridge = 'br' + device - - # Ensure xapi is not running - print("Stopping xapi...") - os.system('service xapi stop >/dev/null 2>/dev/null') - - # Reconfigure new management interface - print("Reconfiguring " + device + "...") - os.system('systemctl stop xcp-networkd >/dev/null 2>/dev/null') - try: - os.remove('/var/lib/xcp/networkd.db') - except Exception as e: - print('Warning: Failed to delete networkd.db.\n%s' % e) - - # Update interfaces in inventory file - print('Updating inventory file...') - inventory = read_inventory() - if vlan != None: - inventory['MANAGEMENT_INTERFACE'] = 'xentemp' - else: - inventory['MANAGEMENT_INTERFACE'] = bridge - inventory['CURRENT_INTERFACES'] = '' - write_inventory(inventory) - - # Rewrite firstboot management.conf file, which will be picked it by xcp-networkd on restart (if used) - is_static = False - try: - f = open(management_conf, 'w') - f.write("LABEL='" + device + "'\n") - if options.mode != "none": - f.write("MODE='" + options.mode + "'\n") - if options.mode_v6 != "none": - f.write("MODEV6='" + options.mode_v6 + "'\n") - if vlan != None: - f.write("VLAN='" + vlan + "'\n") - if options.mode == 'static': - is_static = True - f.write("IP='" + options.ip + "'\n") - f.write("NETMASK='" + options.netmask + "'\n") - if options.gateway != '': - f.write("GATEWAY='" + options.gateway + "'\n") - if options.mode_v6 == "static": - is_static = True - f.write("IPv6='" + options.ipv6 + "'\n") - if options.gateway_v6 != '': - f.write("IPv6_GATEWAY='" + options.gateway_v6 + "'\n") - if is_static and options.dns != '': - f.write("DNS='" + options.dns + "'\n") - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Write trigger file for XAPI to continue the network reset on startup - try: - f = open(network_reset, 'w') - f.write('DEVICE=' + device + '\n') - if options.mode != "none": - f.write('MODE=' + options.mode + '\n') - if options.mode_v6 != "none": - f.write('MODE_V6=' + options.mode_v6 + '\n') - if vlan != None: - f.write('VLAN=' + vlan + '\n') - if options.mode == 'static': - f.write('IP=' + options.ip + '\n') - f.write('NETMASK=' + options.netmask + '\n') - if options.gateway != '': - f.write('GATEWAY=' + options.gateway + '\n') - if options.mode_v6 == "static": - f.write('IPV6=' + options.ipv6 + '\n') - if options.gateway_v6 != '': - f.write('GATEWAY_V6=' + options.gateway_v6 + '\n') - if is_static and options.dns != '': - f.write('DNS=' + options.dns + '\n') - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Reset the domain 0 network interface naming configuration - # back to a fresh-install state for the currently-installed - # hardware. - os.system("/etc/sysconfig/network-scripts/interface-rename.py --reset-to-install") - - # Reboot - os.system("mount -o remount,rw / && reboot -f") - diff --git a/scripts/xe-restore-metadata b/scripts/xe-restore-metadata index 5968dc102e8..ca7029d7c07 100755 --- a/scripts/xe-restore-metadata +++ b/scripts/xe-restore-metadata @@ -65,7 +65,7 @@ function test_sr { NS="e93e0639-2bdb-4a59-8b46-352b3f408c19" function uuid5 { # could use a modern uuidgen but it's not on XS 8 - python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" + python3 -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" } dry_run=0