From d5a73b2056114e51fa523e9249b550840752c7ea Mon Sep 17 00:00:00 2001 From: Fei Su Date: Wed, 24 Jan 2024 10:20:47 +0800 Subject: [PATCH 001/341] format with black and isort Signed-off-by: Fei Su --- scripts/mail-alarm | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/scripts/mail-alarm b/scripts/mail-alarm index 99be5c44de8..8db6fffb811 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # mail-alarm: uses ssmtp to send a mail message, to pool:other_config:mail-destination # @@ -11,18 +11,18 @@ # the only thing that needs be set is pool:other-config:ssmtp-mailhub from __future__ import print_function -import XenAPI -import sys + +import json import os +import re +import sys +import syslog import tempfile import traceback -import syslog -import json -import re -from xml.dom import minidom -from xml.sax.saxutils import unescape -from xml.parsers.expat import ExpatError from socket import getfqdn +from xml.dom import minidom + +import XenAPI from xcp import branding # Go read man ssmtp.conf @@ -241,7 +241,9 @@ class CpuUsageAlarmETG(EmailTextGenerator): period="%d" % self.alarm_trigger_period, level="%.1f" % (self.alarm_trigger_level * 100.0), brand_console=branding.BRAND_CONSOLE, - cls_name=(self.cls == "Host" or self.params["is_control_domain"]) and "Server" or "VM", + cls_name=(self.cls == "Host" or self.params["is_control_domain"]) + and "Server" + or "VM", ) @@ -365,7 +367,9 @@ class MemoryUsageAlarmETG(EmailTextGenerator): period="%d" % self.alarm_trigger_period, level="%d" % self.alarm_trigger_level, brand_console=branding.BRAND_CONSOLE, - cls_name=(self.cls == "Host" or self.params["is_control_domain"]) and "Server" or "VM", + cls_name=(self.cls == "Host" or self.params["is_control_domain"]) + and "Server" + or "VM", ) @@ -797,7 +801,6 @@ class XapiMessage: return self.cached_etg if self.name == "ALARM": - ( value, name, @@ -827,8 +830,10 @@ class XapiMessage: self.mail_language, self.session, ) - elif name in ["memory_free_kib", # for Host - "memory_internal_free"]: # for VM + elif name in [ + "memory_free_kib", # for Host + "memory_internal_free", # for VM + ]: etg = MemoryUsageAlarmETG( self.cls, self.obj_uuid, @@ -980,7 +985,7 @@ def main(): 'Expected at least 1 argument but got none: ["%s"].' % (" ".join(sys.argv)) ) raise Exception("Insufficient arguments") - + session = XenAPI.xapi_local() ma_username = "__dom0__mail_alarm" session.xenapi.login_with_password( @@ -988,8 +993,6 @@ def main(): ) try: - - other_config = get_pool_other_config(session) if "mail-min-priority" in other_config: min_priority = int(other_config["mail-min-priority"]) From dd6d4f499158f9096272bac4491f410707bbb98a Mon Sep 17 00:00:00 2001 From: Fei Su Date: Wed, 24 Jan 2024 14:32:41 +0800 Subject: [PATCH 002/341] Address the error raised by pytype Signed-off-by: Fei Su --- scripts/mail-alarm | 59 ++++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/scripts/mail-alarm b/scripts/mail-alarm index 8db6fffb811..9cc9cbc4dc5 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -15,6 +15,7 @@ from __future__ import print_function import json import os import re +import subprocess import sys import syslog import tempfile @@ -107,12 +108,14 @@ def get_mail_language(other_config): def get_config_file(): try: - return open("/etc/mail-alarm.conf").read() + with open("/etc/mail-alarm.conf", "r") as file: + return file.read() except: return default_config def load_mail_language(mail_language): + mail_language_file = "" try: mail_language_file = os.path.join( mail_language_pack_path, mail_language + ".json" @@ -727,7 +730,10 @@ class XapiMessage: xmldoc = minidom.parseString(xml) def get_text(tag): - return xmldoc.getElementsByTagName(tag)[0].firstChild.toxml() + text = xmldoc.getElementsByTagName(tag)[0].firstChild + if text is None: + raise ValueError("Get text failed with tag <{}>".format(tag)) + return text.toxml() self.name = get_text("name") self.priority = get_text("priority") @@ -880,7 +886,7 @@ class XapiMessage: self.mail_language, self.session, ) - elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", name): + elif name and re.match("sr_io_throughput_total_[0-9a-f]{8}$", name): etg = SRIOThroughputTotalAlertETG( self.cls, self.obj_uuid, @@ -1025,29 +1031,36 @@ def main(): config = config.replace(s, r) # Write out a temporary file containing the new config - fd, fname = tempfile.mkstemp(prefix="mail-", dir="/tmp") - try: - os.write(fd, config) - os.close(fd) + with tempfile.NamedTemporaryFile( + prefix="mail-", dir="/tmp", delete=False + ) as temp_file: + temp_file.write(config.encode()) + temp_file_path = temp_file.name + try: # Run ssmtp to send mail - chld_stdin, chld_stdout = os.popen2( - ["/usr/sbin/ssmtp", "-C%s" % fname, destination] - ) - chld_stdin.write("From: %s\n" % sender) - chld_stdin.write('Content-Type: text/plain; charset="%s"\n' % charset) - chld_stdin.write("To: %s\n" % destination.encode(charset)) - chld_stdin.write( - "Subject: %s\n" % msg.generate_email_subject().encode(charset) - ) - chld_stdin.write("\n") - chld_stdin.write(msg.generate_email_body().encode(charset)) - chld_stdin.close() - chld_stdout.close() - os.wait() - + with subprocess.Popen( + ["/usr/sbin/ssmtp", "-C%s" % temp_file_path, destination], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + ) as proc: + input_data = ( + "From: %s\n" + 'Content-Type: text/plain; charset="%s"\n' + "To: %s\n" + "Subject: %s\n" + "\n" + "%s" + ) % ( + sender, + charset, + destination.encode(charset), + msg.generate_email_subject().encode(charset), + msg.generate_email_body().encode(charset), + ) + proc.communicate(input=input_data.encode(charset)) finally: - os.unlink(fname) + os.remove(temp_file_path) finally: session.xenapi.session.logout() From 82da5a58f19c83837e4b15ce1cb168b621b64894 Mon Sep 17 00:00:00 2001 From: Fei Su Date: Thu, 25 Jan 2024 17:01:24 +0800 Subject: [PATCH 003/341] Fix str.encode issue in python3 Signed-off-by: Fei Su --- scripts/mail-alarm | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/mail-alarm b/scripts/mail-alarm index 9cc9cbc4dc5..cc8a3419166 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -1024,20 +1024,20 @@ def main(): return 1 if not sender: - sender = "noreply@%s" % getfqdn().encode(charset) + sender = "noreply@%s" % getfqdn() # Replace macros in config file using search_replace list for s, r in search_replace: config = config.replace(s, r) # Write out a temporary file containing the new config - with tempfile.NamedTemporaryFile( - prefix="mail-", dir="/tmp", delete=False - ) as temp_file: - temp_file.write(config.encode()) - temp_file_path = temp_file.name - try: + with tempfile.NamedTemporaryFile( + prefix="mail-", dir="/tmp", delete=False + ) as temp_file: + temp_file.write(config.encode()) + temp_file_path = temp_file.name + # Run ssmtp to send mail with subprocess.Popen( ["/usr/sbin/ssmtp", "-C%s" % temp_file_path, destination], @@ -1054,9 +1054,9 @@ def main(): ) % ( sender, charset, - destination.encode(charset), - msg.generate_email_subject().encode(charset), - msg.generate_email_body().encode(charset), + destination, + msg.generate_email_subject(), + msg.generate_email_body(), ) proc.communicate(input=input_data.encode(charset)) finally: From adac07087423a8aecca20da56e156cd67d81d3f1 Mon Sep 17 00:00:00 2001 From: Fei Su Date: Mon, 29 Jan 2024 14:18:16 +0800 Subject: [PATCH 004/341] fix issue: json.load in python3 doesn't have 'encoding' paramteter Signed-off-by: Fei Su --- scripts/mail-alarm | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/mail-alarm b/scripts/mail-alarm index cc8a3419166..45e834d1c5e 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -120,8 +120,8 @@ def load_mail_language(mail_language): mail_language_file = os.path.join( mail_language_pack_path, mail_language + ".json" ) - with open(mail_language_file, "r") as fileh: - return json.load(fileh, encoding="utf-8") + with open(mail_language_file, encoding="utf-8") as fileh: + return json.load(fileh) except IOError: log_err('Read mail language pack error:["%s"]' % (mail_language_file)) return None @@ -1031,6 +1031,7 @@ def main(): config = config.replace(s, r) # Write out a temporary file containing the new config + temp_file_path = "" try: with tempfile.NamedTemporaryFile( prefix="mail-", dir="/tmp", delete=False From f42f23f9a60557c0f9b8cbb44166e47dcebbf8f1 Mon Sep 17 00:00:00 2001 From: Fei Su Date: Tue, 30 Jan 2024 09:08:43 +0800 Subject: [PATCH 005/341] add a conditiall branch for ensuring pass in python2 ut Signed-off-by: Fei Su --- scripts/mail-alarm | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/scripts/mail-alarm b/scripts/mail-alarm index 45e834d1c5e..5fd432339bf 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -120,8 +120,15 @@ def load_mail_language(mail_language): mail_language_file = os.path.join( mail_language_pack_path, mail_language + ".json" ) + + # this conditional branch won't be executed, it's solely for the purpose of ensuring pass in python2 ut. + if sys.version_info.major == 2: + with open(mail_language_file, "r") as fileh: + return json.load(fileh, encoding="utf-8") + with open(mail_language_file, encoding="utf-8") as fileh: return json.load(fileh) + except IOError: log_err('Read mail language pack error:["%s"]' % (mail_language_file)) return None From 089ed2a08b9c516766a8f56144bc68300d702a82 Mon Sep 17 00:00:00 2001 From: Fei Su Date: Fri, 2 Feb 2024 10:48:27 +0800 Subject: [PATCH 006/341] Error: scripts/mail-alarm was changed, remove it from expected_to_fail in pyproject.toml and make sure it passes pytype checks Signed-off-by: Fei Su --- pyproject.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b65a36bb062..dc0221cd329 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,8 +77,6 @@ expected_to_fail = [ "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", "scripts/nbd_client_manager.py", - # No attribute 'popen2' on module 'os' [module-attr] and a couple more: - "scripts/mail-alarm", # SSLSocket.send() only accepts bytes, not unicode string as argument: "scripts/examples/python/exportimport.py", # Other fixes needed: From afb29bf8c156f81e487f8398fe254450ddf2c40d Mon Sep 17 00:00:00 2001 From: Yann Dirson Date: Thu, 18 Jan 2024 17:59:37 +0100 Subject: [PATCH 007/341] py3: make xapi-storage py3-compatible This is a redo of 4140ff117038656da0e75a8387720b9c8401e9e0, not touching `str`. This uses the same mechanism as ac683ca9815f65ddc48bffdb4e242909387c3daa, to deal with occurrence of `long` causing `make test` to fail with python3, but also occurrences of `unicode`. Signed-off-by: Yann Dirson --- ocaml/xapi-storage/python/xapi/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index 0027af213bf..d2a0eed3f94 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -31,6 +31,11 @@ import json import argparse +# pylint: disable=invalid-name,redefined-builtin,undefined-variable +# pyright: reportUndefinedVariable=false +if sys.version_info[0] > 2: + long = int + unicode = str def success(result): return {"Status": "Success", "Value": result} From 75858d7bed6423fc88c435b7a4fd149e2148016e Mon Sep 17 00:00:00 2001 From: Yann Dirson Date: Thu, 18 Jan 2024 18:13:49 +0100 Subject: [PATCH 008/341] py3: make sure we are not using unicode type in python3 This is a redo of 48c8c3ec425c89afcd839b6a9c5b2bf7725de567, not touching `long` and `str`. `unicode` is only used in there for testing whether we have a string, so aliasing it to `str` is valid. OTOH we likely don't want to accept `bytes` where we accept `str` in python2, and `str` gets aliased to `bytes` in other areas of the code, so this might reveal issues in other places. Signed-off-by: Yann Dirson --- ocaml/xapi-storage/python/xapi/storage/api/datapath.py | 8 +++++++- ocaml/xapi-storage/python/xapi/storage/api/plugin.py | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py index 1d5b43b0dca..69b37e5a9e7 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py @@ -6,6 +6,12 @@ import argparse import traceback import logging + +# pylint: disable=invalid-name,redefined-builtin,undefined-variable +# pyright: reportUndefinedVariable=false +if sys.version_info[0] > 2: + unicode = str + class Unimplemented(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) @@ -398,4 +404,4 @@ def _dispatch(self, method, params): class datapath_server_test(datapath_server_dispatcher): """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" def __init__(self): - datapath_server_dispatcher.__init__(self, Datapath_server_dispatcher(Datapath_test())) \ No newline at end of file + datapath_server_dispatcher.__init__(self, Datapath_server_dispatcher(Datapath_test())) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py index 0185d900148..1b6d37214ca 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py @@ -6,6 +6,12 @@ import argparse import traceback import logging + +# pylint: disable=invalid-name,redefined-builtin,undefined-variable +# pyright: reportUndefinedVariable=false +if sys.version_info[0] > 2: + unicode = str + class Unimplemented(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) @@ -230,4 +236,4 @@ def _dispatch(self, method, params): class plugin_server_test(plugin_server_dispatcher): """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" def __init__(self): - plugin_server_dispatcher.__init__(self, Plugin_server_dispatcher(Plugin_test())) \ No newline at end of file + plugin_server_dispatcher.__init__(self, Plugin_server_dispatcher(Plugin_test())) From 7d33cfb1368570d5dc7ba0d6bd31355903c52c2e Mon Sep 17 00:00:00 2001 From: Yann Dirson Date: Thu, 18 Jan 2024 17:33:46 +0100 Subject: [PATCH 009/341] Switch xapi-storage-scripts tests to python3 With this a "make test" after build out of OPAM on Debian 12 finishes successfully. Signed-off-by: Yann Dirson --- .../test/volume/org.xen.xapi.storage.dummy/plugin.py | 2 +- .../test/volume/org.xen.xapi.storage.dummy/sr.py | 10 +++++----- .../test/volume/org.xen.xapi.storage.dummy/volume.py | 10 +++++----- .../test/volume/org.xen.xapi.storage.dummyv5/plugin.py | 2 +- .../test/volume/org.xen.xapi.storage.dummyv5/sr.py | 10 +++++----- .../test/volume/org.xen.xapi.storage.dummyv5/volume.py | 10 +++++----- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py index 08fb78407e0..40e3a00911c 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py index 3cd7a211c8f..82c77d891db 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. @@ -6,7 +6,7 @@ import os import sys -import urlparse +import urllib.parse import xapi.storage.api.volume import plugin @@ -21,11 +21,11 @@ def create(self, dbg, uri, name, description, configuration): return def detach(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def ls(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return [{ "name": qr['name'], @@ -40,7 +40,7 @@ def ls(self, dbg, sr): }] def stat(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "sr": sr, diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py index 448ee6dcbc3..848c13bfd39 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. """ import uuid -import urlparse +import urllib.parse import os import sys import xapi.storage.api.volume @@ -17,7 +17,7 @@ class Implementation(xapi.storage.api.volume.Volume_skeleton): def create(self, dbg, sr, name, description, size): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) voluuid = str(uuid.uuid4()) return { "name": name, @@ -32,11 +32,11 @@ def create(self, dbg, sr, name, description, size): } def destroy(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def stat(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "name": qr['name'], diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py index 5816f0dd217..e9ef122ca07 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py index 6100407e91d..3c649423d15 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/sr.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. @@ -6,7 +6,7 @@ import os import sys -import urlparse +import urllib.parse import xapi.storage.api.v5.volume import plugin @@ -22,11 +22,11 @@ def create(self, dbg, uuid, configuration, name, description): return configuration def detach(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def ls(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return [{ "name": qr['name'], @@ -42,7 +42,7 @@ def ls(self, dbg, sr): }] def stat(self, dbg, sr): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "sr": sr, diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py index 20822dd8d73..fcf52ce3883 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/volume.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ Copyright (C) Citrix Systems, Inc. """ import uuid -import urlparse +import urllib.parse import os import sys import xapi.storage.api.v5.volume @@ -17,7 +17,7 @@ class Implementation(xapi.storage.api.v5.volume.Volume_skeleton): def create(self, dbg, sr, name, description, size, sharable): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) voluuid = str(uuid.uuid4()) return { "name": name, @@ -33,11 +33,11 @@ def create(self, dbg, sr, name, description, size, sharable): } def destroy(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) return def stat(self, dbg, sr, key): - urlparse.urlparse(sr) + urllib.parse.urlparse(sr) qr = plugin.Implementation().query(dbg) return { "name": qr['name'], From af3a3e4314b9cb94760a6a7f66bf428ab316a045 Mon Sep 17 00:00:00 2001 From: Yann Dirson Date: Tue, 23 Jan 2024 18:15:27 +0100 Subject: [PATCH 010/341] Remove now-unused PY_TEST guard Reported-by: Pau Ruiz Safont Signed-off-by: Yann Dirson --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index bcfc5b9eb78..ba121000e3a 100644 --- a/Makefile +++ b/Makefile @@ -62,9 +62,7 @@ test: trap "kill $${PSTREE_SLEEP_PID}" SIGINT SIGTERM EXIT; \ timeout --foreground $(TEST_TIMEOUT2) \ dune runtest --profile=$(PROFILE) --error-reporting=twice -j $(JOBS) -ifneq ($(PY_TEST), NO) dune build @runtest-python --profile=$(PROFILE) -endif stresstest: dune build @stresstest --profile=$(PROFILE) --no-buffer -j $(JOBS) From b70c604e4f29dafc25b38a3d95cec7211e142e83 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 28 Feb 2024 01:36:47 +0000 Subject: [PATCH 011/341] CP-47935: Create a subdirectory for python3-only scripts insatlled in BIN Signed-off-by: Stephen Cheng --- .codecov.yml | 41 +++++++++++++++++++++++++-- .github/workflows/main.yml | 15 +++++++++- pyproject.toml | 6 ++-- python3/Makefile | 7 +++++ {scripts => python3/bin}/hfx_filename | 31 +++++++++++--------- scripts/Makefile | 1 - 6 files changed, 81 insertions(+), 20 deletions(-) create mode 100644 python3/Makefile rename {scripts => python3/bin}/hfx_filename (82%) diff --git a/.codecov.yml b/.codecov.yml index 8380434a2a5..9be7955160d 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -158,6 +158,31 @@ coverage: # threshold: 20% + python3: + + # + # The python3 limit applies to: + # ----------------------------- + # + # - python3/** + # - excluding: **/test_*.py + # + paths: ["python3/**", "!**/test_*.py"] + + # + # For python3/** (excluding tests): + # + # For python3, coverage should not be reduced compared to its base: + # + target: auto + + # + # Exception: the threshold value given is allowed + # + # Allows for not covering 20% if the changed lines of the PR: + # + threshold: 20% + # Checks each Python version separately: python-3.11: flags: ["python3.11"] @@ -175,18 +200,26 @@ coverage: # Python modules and scripts below scripts/ (excluding tests) # scripts: + paths: ["scripts/**", "!**/test_*.py"] target: 48% threshold: 2% - paths: ["scripts/**", "!**/test_*.py"] # - # Python modules and scripts below ocaml/ + # Python modules and scripts below ocaml/ (excluding tests) # ocaml: paths: ["ocaml/**", "!**/test_*.py"] target: 51% threshold: 3% + # + # Python modules and scripts below python3/ (excluding tests) + # + python3: + paths: ["python3/**", "!**/test_*.py"] + target: 48% + threshold: 2% + # # Test files # @@ -239,6 +272,10 @@ component_management: - "ocaml/xapi-storage-script/**" - "!**/test_*.py" + - component_id: python3 + name: python3 + paths: ["python3/**", "!**/test_*.py"] + - component_id: test_cases name: test_cases paths: ["**/test_*.py"] diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cadf84c35c4..7b660722b20 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -56,7 +56,8 @@ jobs: - name: Install common dependencies for Python ${{matrix.python-version}} run: pip install future mock pytest-coverage pytest-mock - - name: Run Pytest and get code coverage for Codecov + - name: Run Pytest for python 2 and get code coverage for Codecov + if: ${{ matrix.python-version == '2.7' }} run: > pytest --cov=scripts --cov=ocaml/xcp-rrdd @@ -67,6 +68,18 @@ jobs: env: PYTHONDEVMODE: yes + - name: Run Pytest for python 3 and get code coverage for Codecov + if: ${{ matrix.python-version != '2.7' }} + run: > + pytest + --cov=scripts --cov=ocaml/xcp-rrdd --cov=python3/ + scripts/ ocaml/xcp-rrdd python3/ -vv -rA + --junitxml=.git/pytest${{matrix.python-version}}.xml + --cov-report term-missing + --cov-report xml:.git/coverage${{matrix.python-version}}.xml + env: + PYTHONDEVMODE: yes + - name: Upload Python ${{matrix.python-version}} coverage report to Codecov uses: codecov/codecov-action@v3 with: diff --git a/pyproject.toml b/pyproject.toml index dc0221cd329..8c902456c05 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,6 @@ discard_messages_matching = [ "No Node.TEXT_NODE in module xml.dom.minidom, referenced from 'xml.dom.expatbuilder'" ] expected_to_fail = [ - "scripts/hfx_filename", "scripts/perfmon", # Need 2to3 -w and maybe a few other minor updates: "scripts/hatests", @@ -96,7 +95,6 @@ expected_to_fail = [ [tool.pytype] inputs = [ - "scripts/hfx_filename", "scripts/perfmon", "scripts/static-vdis", "scripts/Makefile", @@ -112,6 +110,10 @@ inputs = [ "scripts/yum-plugins", "scripts/*.py", + # Python 3 + "python3/bin/hfx_filename", + "python3/*.py", + # To be added later, # when converted to Python3-compatible syntax: # "ocaml/message-switch/python", diff --git a/python3/Makefile b/python3/Makefile new file mode 100644 index 00000000000..6d0089bac98 --- /dev/null +++ b/python3/Makefile @@ -0,0 +1,7 @@ +include ../config.mk + +IPROG=install -m 755 + +install: + mkdir -p $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin \ No newline at end of file diff --git a/scripts/hfx_filename b/python3/bin/hfx_filename similarity index 82% rename from scripts/hfx_filename rename to python3/bin/hfx_filename index cea0f808200..dd8677fc499 100755 --- a/scripts/hfx_filename +++ b/python3/bin/hfx_filename @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015 Citrix, Inc. # @@ -14,8 +14,8 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import print_function -import sys, os, socket, urllib2, urlparse, XenAPI, traceback, xmlrpclib + +import sys, socket, urllib.request, XenAPI db_url = "/remote_db_access" @@ -28,18 +28,20 @@ def rpc(session_id, request): headers = [ "POST %s?session_id=%s HTTP/1.0" % (db_url, session_id), "Connection:close", - "content-length:%d" % (len(request)), + "content-length:%d" % (len(request.encode('utf-8'))), "" ] - #print "Sending HTTP request:" for h in headers: - s.send("%s\r\n" % h) - #print "%s\r\n" % h, - s.send(request) + s.send((h + "\r\n").encode('utf-8')) + s.send(request.encode('utf-8')) + + result = "" + while True: + chunk = s.recv(1024) + if not chunk: + break + result += chunk.decode('utf-8') - result = s.recv(1024) - #print "Received HTTP response:" - #print result if "200 OK" not in result: print("Expected an HTTP 200, got %s" % result, file=sys.stderr) return @@ -55,13 +57,15 @@ def rpc(session_id, request): s.close() def parse_string(txt): + if not txt: + raise Exception("Unable to parse string response: None") prefix = "success" if not txt.startswith(prefix): - raise "Unable to parse string response" + raise Exception("Unable to parse string response: Wrong prefix") txt = txt[len(prefix):] suffix = "" if not txt.endswith(suffix): - raise "Unable to parse string response" + raise Exception("Unable to parse string response: Wrong suffix") txt = txt[:len(txt)-len(suffix)] return txt @@ -76,7 +80,6 @@ def read_field(session_id, table, fld, rf): return response if __name__ == "__main__": - import XenAPI xapi = XenAPI.xapi_local() xapi.xenapi.login_with_password('root', '') session_id = xapi._session diff --git a/scripts/Makefile b/scripts/Makefile index 8f07e91efe7..48aea975bf4 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -150,7 +150,6 @@ install: $(IPROG) xe-syslog-reconfigure $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-install-supplemental-pack $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-enable-ipv6 $(DESTDIR)$(OPTDIR)/bin - $(IPROG) hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) pv2hvm $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)/etc/cron.daily mkdir -p $(DESTDIR)/etc/cron.hourly From aec50b0dd15a0487123622bba10458b175a2d500 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 28 Feb 2024 04:39:42 +0000 Subject: [PATCH 012/341] CP-47935: Add unit tests for hfx_filename Signed-off-by: Stephen Cheng --- python3/unittest/import_file.py | 25 +++++ python3/unittest/test_hfx_filename.py | 132 ++++++++++++++++++++++++++ 2 files changed, 157 insertions(+) create mode 100644 python3/unittest/import_file.py create mode 100644 python3/unittest/test_hfx_filename.py diff --git a/python3/unittest/import_file.py b/python3/unittest/import_file.py new file mode 100644 index 00000000000..2589e640232 --- /dev/null +++ b/python3/unittest/import_file.py @@ -0,0 +1,25 @@ +""" +This file is used for importing a non-".py" file as a module in unit test. +It never runs directly, so no shebang and no main() +""" +import sys +import os +from importlib import machinery, util + +def import_from_file(module_name, file_path): + """Import a file as a module""" + loader = machinery.SourceFileLoader(module_name, file_path) + spec = util.spec_from_loader(module_name, loader) + assert spec + assert spec.loader + module = util.module_from_spec(spec) + # Probably a good idea to add manually imported module stored in sys.modules + sys.modules[module_name] = module + spec.loader.exec_module(module) + return module + +def get_module(module_name, file_path): + """get the module from a file""" + testdir = os.path.dirname(__file__) + print(testdir) + return import_from_file(module_name, testdir + file_path) diff --git a/python3/unittest/test_hfx_filename.py b/python3/unittest/test_hfx_filename.py new file mode 100644 index 00000000000..6e6964a24f2 --- /dev/null +++ b/python3/unittest/test_hfx_filename.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +This module provides unittest for hfx_filename +""" + +import sys +import unittest +from mock import MagicMock, patch, call +from import_file import get_module + +# mock modules to avoid dependencies +sys.modules["XenAPI"] = MagicMock() + +hfx_filename = get_module("hfx_filename", "/../bin/hfx_filename") + + +@patch("socket.socket") +class TestRpc(unittest.TestCase): + """ + This class tests blow functions: + rpc() + db_get_uuid() + read_field() + """ + def test_rpc(self, mock_socket): + """ + Tests rpc + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + recv_data = b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHelloWorld" + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + session_id = 0 + request = "socket request" + body = hfx_filename.rpc(session_id, request) + + # Assert that the socket methods were called as expected + expected_data = [ + b"POST /remote_db_access?session_id=0 HTTP/1.0\r\n", + b"Connection:close\r\n", + b"content-length:14\r\n", + b"\r\n", + b"socket request" + ] + mock_connected_socket.send.assert_has_calls([call(data) for data in expected_data]) + + expected_return = "HelloWorld" + self.assertEqual(expected_return, body) + + def test_rpc_international_character(self, mock_socket): + """ + Tests rpc using non-ascii characters + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + recv_data = b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHelloWorld" + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + session_id = 0 + # Use international character"socket 请求" as request + request = "socket 请求" + body = hfx_filename.rpc(session_id, request) + + # Assert that the socket methods were called as expected + expected_data = [ + b"POST /remote_db_access?session_id=0 HTTP/1.0\r\n", + b"Connection:close\r\n", + b"content-length:13\r\n", + b"\r\n", + request.encode('utf-8') + ] + mock_connected_socket.send.assert_has_calls([call(data) for data in expected_data]) + + expected_return = "HelloWorld" + self.assertEqual(expected_return, body) + + def test_db_get_uuid(self, mock_socket): + """ + Tests db_get_uuid + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + header = "HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n" + body = ("successHelloWorld" + "") + recv_data = (header + body).encode('utf-8') + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + expected_response = "HelloWorld" + response = hfx_filename.db_get_by_uuid(0, "pool_patch", "22345") + self.assertEqual(expected_response, response) + + def test_read_field(self, mock_socket): + """ + Tests read_field + """ + mock_connected_socket = MagicMock() + mock_socket.return_value = mock_connected_socket + + header = "HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n" + body = ("successfile_name" + "") + recv_data = (header + body).encode('utf-8') + # Set the return value for the first call to recv + mock_connected_socket.recv.side_effect = [recv_data, None] + + expected_filename = "file_name" + filename = hfx_filename.read_field(0, "pool_patch", "filename", "rf") + self.assertEqual(expected_filename, filename) + + +class TestParse(unittest.TestCase): + """ + This class tests function parse_string() + """ + def test_parse_string(self): + """ + Tests parse_string + """ + txt = ("successabcde" + "") + expected_txt = "abcde" + return_txt = hfx_filename.parse_string(txt) + self.assertEqual(expected_txt, return_txt) From 60dbc3c8340ee0320b82e93cc94583f91d2298d7 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 8 Mar 2024 03:13:19 +0000 Subject: [PATCH 013/341] Set the unit test codcov target to 80% There were no python3 unit tests now. Set the unit test coverage to 80% for the py3 update. In the future, if there are cases where the scripts are not UTable or we can cover them by manual test or XenRT test, we can handle it by excluding them from the check. Signed-off-by: Stephen Cheng --- .codecov.yml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 9be7955160d..f7562dbaf3c 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -168,13 +168,7 @@ coverage: # - excluding: **/test_*.py # paths: ["python3/**", "!**/test_*.py"] - - # - # For python3/** (excluding tests): - # - # For python3, coverage should not be reduced compared to its base: - # - target: auto + target: 80% # # Exception: the threshold value given is allowed @@ -278,4 +272,5 @@ component_management: - component_id: test_cases name: test_cases - paths: ["**/test_*.py"] + paths: ["python3/unittest/test_*.py"] + From 10e9c37b5f170bde28e8630dc46b143090f0d25f Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 11 Mar 2024 07:24:02 +0000 Subject: [PATCH 014/341] Only test files migrated to python3 Signed-off-by: Stephen Cheng --- .codecov.yml | 4 +--- .github/workflows/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index f7562dbaf3c..47ef46ac090 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -180,8 +180,6 @@ coverage: # Checks each Python version separately: python-3.11: flags: ["python3.11"] - python-2.7: - flags: ["python2.7"] # # Project limits @@ -220,7 +218,7 @@ coverage: tests: # Ensure that all tests are executed (tests themselves must be 100% covered) target: 98% - paths: ["**/test_*.py"] + paths: ["python3/unittest/test_*.py"] # diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d38a825a23b..e8a6b84cbfb 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -72,8 +72,8 @@ jobs: if: ${{ matrix.python-version != '2.7' }} run: > pytest - --cov=scripts --cov=ocaml/xcp-rrdd --cov=python3/ - scripts/ ocaml/xcp-rrdd python3/ -vv -rA + --cov=python3/unittest + python3/unittest -vv -rA --junitxml=.git/pytest${{matrix.python-version}}.xml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml From 9b73950b01641eec54e098858c3f06cd715d50ae Mon Sep 17 00:00:00 2001 From: acefei Date: Wed, 13 Mar 2024 13:52:22 +0800 Subject: [PATCH 015/341] CP-47555 Porting usb_scan.py to python3 (#5424) * formate with black * decode value produced by python3-pyudev which return bytes * remove usb_scan from expected_to_fail list in pyproject.toml * update some neccesary pylint issues * fix pytype error: unsupported operand type(s) for +: str and UsbInterface * Disable false positive in Pytype error reporting * update for comments * fix ut errors as python-pyudev return bytes instead of string * increase code coverage * format with black for test_usb_scan.py * move usb_scan.py with ut code into python3 folder * Disable the code coverage of scripts folder We're moving the python code and unittest into python3 folder, that would be resulting in a decrease in coverage rate in scripts folder * false positive for code coverage * solve the pylint warning --------- Signed-off-by: Fei Su --- .github/workflows/main.yml | 2 +- pyproject.toml | 18 +- python3/Makefile | 2 + {scripts => python3/libexec}/usb_scan.py | 207 +++++++++-------- python3/unittest/import_file.py | 2 +- python3/unittest/test_hfx_filename.py | 2 +- .../unittest}/test_usb_scan.py | 208 +++++++++--------- scripts/Makefile | 2 - 8 files changed, 234 insertions(+), 209 deletions(-) rename {scripts => python3/libexec}/usb_scan.py (79%) rename {scripts => python3/unittest}/test_usb_scan.py (66%) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e8a6b84cbfb..a51f40e91e6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -60,7 +60,7 @@ jobs: if: ${{ matrix.python-version == '2.7' }} run: > pytest - --cov=scripts --cov=ocaml/xcp-rrdd + --cov=ocaml/xcp-rrdd scripts/ ocaml/xcp-rrdd -vv -rA --junitxml=.git/pytest${{matrix.python-version}}.xml --cov-report term-missing diff --git a/pyproject.toml b/pyproject.toml index 8ab98205e89..afc1ff32067 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,20 @@ profile = "black" combine_as_imports = true ensure_newline_before_comments = false +[tool.pylint.messages_control] +disable = [ + "missing-function-docstring", + "missing-module-docstring", + "consider-using-f-string", + "too-many-branches", + "broad-exception-caught", + "no-else-break", + "no-else-return", + "invalid-name", + "import-error", + "unnecessary-pass", + "unspecified-encoding", +] [tool.mypy] # Note mypy has no config setting for PYTHONPATH, so you need to call it with: @@ -85,10 +99,6 @@ expected_to_fail = [ "scripts/examples/python/shell.py", "scripts/examples/smapiv2.py", "scripts/static-vdis", - # add_interface: unsupported operand type(s) for +: str and UsbInterface - "scripts/usb_scan.py", - # TestUsbScan.assertIn() is called with wrong arguments(code not iterable) - "scripts/test_usb_scan.py", "scripts/plugins/extauth-hook-AD.py", ] diff --git a/python3/Makefile b/python3/Makefile index a8b6ad3d0b5..02d819443ed 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -8,6 +8,8 @@ SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; pr install: mkdir -p $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(SITE3_DIR) + mkdir -p $(DESTDIR)$(LIBEXECDIR) $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ + $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) diff --git a/scripts/usb_scan.py b/python3/libexec/usb_scan.py similarity index 79% rename from scripts/usb_scan.py rename to python3/libexec/usb_scan.py index 25290b362a9..187418741e6 100755 --- a/scripts/usb_scan.py +++ b/python3/libexec/usb_scan.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright (C) Citrix Systems Inc. # @@ -21,16 +21,17 @@ # 2. check if device can be passed through based on policy file # 3. return the device info to XAPI in json format -from __future__ import print_function + import abc import argparse import json -import xcp.logger as log import logging -import pyudev import re import sys +import pyudev +import xcp.logger as log + def log_list(l): for s in l: @@ -43,7 +44,7 @@ def log_exit(m): def hex_equal(h1, h2): - """ check if the value of hex string are equal + """check if the value of hex string are equal :param h1:(str) lhs hex string :param h2:(str) rhs hex string @@ -56,14 +57,15 @@ def hex_equal(h1, h2): class UsbObject(dict): - """ Base class of USB classes, save USB properties in dict + """Base class of USB classes, save USB properties in dict node(str): the key, device node """ + __metaclass__ = abc.ABCMeta def __init__(self, node): - super(UsbObject, self).__init__() + super().__init__() self.node = node def get_node(self): @@ -90,11 +92,12 @@ def debug_str(self, level=0): :param level: the indent level :return: the debug string """ - return self.indent(level) + self.__class__.__name__ + ": " + \ - str((self.node, self)) + return ( + self.indent(level) + self.__class__.__name__ + ": " + str((self.node, self)) + ) def is_initialized(self): - """ check if all properties are properly set + """check if all properties are properly set :return: bool, if properties are ready """ @@ -107,24 +110,22 @@ def _is_class_hub(self, key_class): @abc.abstractmethod def is_class_hub(self): - """ check if this belongs to a hub + """check if this belongs to a hub :return: bool, if this belongs to a hub """ - pass @abc.abstractmethod def is_child_of(self, parent): - """ check if this is a child of parent + """check if this is a child of parent :param parent:(UsbObject) the parent to check against :return: """ - pass @staticmethod def validate_int(s, base=10): - """ validate if a string can be converted to int + """validate if a string can be converted to int :param s:(str) the string to be converted :param base:(int) the radix base of integer to convect @@ -138,10 +139,11 @@ def validate_int(s, base=10): class UsbDevice(UsbObject): - """ Class for USB device, save USB properties in UsbObject dict + """Class for USB device, save USB properties in UsbObject dict interfaces:([UsbInterface]) list of USB interfaces belonging to this device """ + _DESC_VENDOR = "ID_VENDOR_FROM_DATABASE" _DESC_PRODUCT = "ID_MODEL_FROM_DATABASE" @@ -156,13 +158,22 @@ class UsbDevice(UsbObject): _USB_SPEED = "speed" _PRODUCT_DESC = [_DESC_VENDOR, _DESC_PRODUCT] - _PRODUCT_DETAILS = [_VERSION, _ID_VENDOR, _ID_PRODUCT, _BCD_DEVICE, _SERIAL, - _CLASS, _CONF_VALUE, _NUM_INTERFACES, _USB_SPEED] + _PRODUCT_DETAILS = [ + _VERSION, + _ID_VENDOR, + _ID_PRODUCT, + _BCD_DEVICE, + _SERIAL, + _CLASS, + _CONF_VALUE, + _NUM_INTERFACES, + _USB_SPEED, + ] _PROPS = _PRODUCT_DESC + _PRODUCT_DETAILS _PROPS_NONABLE = _PRODUCT_DESC + [_SERIAL] def __init__(self, node, props1, props2): - """ initialise UsbDevice, set node and properties + """initialise UsbDevice, set node and properties :param node(str): device node :param props1(pyudev.Device): device, to get properties from UDEV @@ -170,14 +181,14 @@ def __init__(self, node, props1, props2): :param props2(pyudev.Device.attributes): device attributes, to get properties from sysfs """ - super(UsbDevice, self).__init__(node) + super().__init__(node) for p in self._PRODUCT_DESC: if props1.get(p) is not None: self[p] = props1.get(p) for p in self._PRODUCT_DETAILS: if props2.get(p) is not None: - self[p] = props2.get(p) + self[p] = props2.get(p).decode() for p in self._PROPS_NONABLE: if p not in self: self[p] = "" @@ -185,7 +196,7 @@ def __init__(self, node, props1, props2): self.interfaces = set() def debug_str(self, level=0): - s = super(UsbDevice, self).debug_str(level) + s = super().debug_str(level) for i in self.interfaces: s += i.debug_str(level + 1) return s @@ -203,7 +214,7 @@ def is_initialized(self): if not self.validate_int(self[p]): return False - return super(UsbDevice, self).is_initialized() + return super().is_initialized() def is_class_hub(self): return self._is_class_hub(self._CLASS) @@ -213,13 +224,13 @@ def is_child_of(self, parent): return False def add_interface(self, interface): - """ add an interface to this device + """add an interface to this device :param interface:(UsbInterface) the UsbInterface to add :return: None """ if interface in self.interfaces: - log.debug("overriding existing interface: " + interface) + log.debug("overriding existing interface: " + str(interface)) self.interfaces.remove(interface) self.interfaces.add(interface) @@ -230,18 +241,18 @@ def del_interface(self, interface): :return: None """ if interface in self.interfaces: - log.debug("removing interface: " + interface) + log.debug("removing interface: " + str(interface)) self.interfaces.remove(interface) def get_all_interfaces(self): - """ get all interfaces attached of this device + """get all interfaces attached of this device :return: set of all interfaces """ return self.interfaces def is_ready(self): - """ check if this device has all the interfaces attached + """check if this device has all the interfaces attached :return: bool, if it's ready to do policy check now """ @@ -250,9 +261,8 @@ def is_ready(self): class UsbInterface(UsbObject): - """ Class for USB interface, save USB properties in UsbObject dict + """Class for USB interface, save USB properties in UsbObject dict""" - """ _NUMBER = "bInterfaceNumber" _CLASS = "bInterfaceClass" _SUB_CLASS = "bInterfaceSubClass" @@ -261,20 +271,19 @@ class UsbInterface(UsbObject): _PROPS = [_NUMBER, _CLASS, _SUB_CLASS, _PROTOCOL] def __init__(self, node, props): - """ initialise UsbInterface, set node and properties + """initialise UsbInterface, set node and properties :param node(str): device node :param props(pyudev.Device.attributes): device attributes, to get properties from sysfs """ - super(UsbInterface, self).__init__(node) + super().__init__(node) for p in self._PROPS: if props.get(p) is not None: - self[p] = props.get(p) + self[p] = props.get(p).decode() def debug_str(self, level=0): - s = super(UsbInterface, self).debug_str(level) - return s + return super().debug_str(level) def is_class_hub(self): return self._is_class_hub(self._CLASS) @@ -287,13 +296,14 @@ def is_initialized(self): for p in self._PROPS: if p not in self or not self.validate_int(self[p], 16): return False - return super(UsbInterface, self).is_initialized() + return super().is_initialized() def is_child_of(self, parent): if isinstance(parent, UsbDevice) and parent.is_initialized(): conf_value = parent[UsbDevice._CONF_VALUE] - pattern = r"^{}:{}\.\d+$".format(re.escape(parent.get_node()), - re.escape(conf_value)) + pattern = r"^{}:{}\.\d+$".format( + re.escape(parent.get_node()), re.escape(conf_value) + ) return re.match(pattern, self.get_node()) is not None return False @@ -318,14 +328,15 @@ def get_usb_info(): return devices, interfaces -class Policy(object): - """ Parse policy file, and check if a UsbDevice can be passed through +class Policy: + """Parse policy file, and check if a UsbDevice can be passed through Policy file spec reference: https://support.citrix.com/article/CTX119722 rule_list: the list of parsed rule """ + _PATH = "/etc/xensource/usb-policy.conf" _CLASS = "class" @@ -336,36 +347,40 @@ class Policy(object): _BCD_DEVICE = "rel" # key in policy <--> key in usb device - _KEY_MAP_DEVICE = {_ID_VENDOR: UsbDevice._ID_VENDOR, - _ID_PRODUCT: UsbDevice._ID_PRODUCT, - _BCD_DEVICE: UsbDevice._BCD_DEVICE} + _KEY_MAP_DEVICE = { + _ID_VENDOR: UsbDevice._ID_VENDOR, # pylint: disable=protected-access + _ID_PRODUCT: UsbDevice._ID_PRODUCT, # pylint: disable=protected-access + _BCD_DEVICE: UsbDevice._BCD_DEVICE, # pylint: disable=protected-access + } # key in policy <--> key in usb interface - _KEY_MAP_INTERFACE = {_CLASS: UsbInterface._CLASS, - _SUBCLASS: UsbInterface._SUB_CLASS, - _PROTOCOL: UsbInterface._PROTOCOL} - - _PAT_KEY = r"\s*({}|{}|{}|{}|{}|{})\s*".format(_CLASS, _SUBCLASS, - _PROTOCOL, _ID_VENDOR, - _ID_PRODUCT, _BCD_DEVICE) + _KEY_MAP_INTERFACE = { + _CLASS: UsbInterface._CLASS, # pylint: disable=protected-access + _SUBCLASS: UsbInterface._SUB_CLASS, # pylint: disable=protected-access + _PROTOCOL: UsbInterface._PROTOCOL, # pylint: disable=protected-access + } + + _PAT_KEY = r"\s*({}|{}|{}|{}|{}|{})\s*".format( + _CLASS, _SUBCLASS, _PROTOCOL, _ID_VENDOR, _ID_PRODUCT, _BCD_DEVICE + ) _PATTERN = r"{}=\s*([0-9a-f]+)".format(_PAT_KEY) _ALLOW = "allow" def __init__(self): - """ parse policy file, generate rule list + """parse policy file, generate rule list Note: hubs are never allowed to pass through """ self.rule_list = [] try: - with open(self._PATH, "r") as f: + with open(self._PATH) as f: log.debug("=== policy file begin") for line in f: log.debug(line[0:-1]) self.parse_line(line) log.debug("=== policy file end") - except IOError as e: + except OSError as e: # without policy file, no device will be allowed to passed through log_exit("Caught error {}, policy file error".format(str(e))) @@ -375,19 +390,21 @@ def __init__(self): def check_hex_length(self, name, value): if name in [self._CLASS, self._SUBCLASS, self._PROTOCOL]: - return 2 == len(value) + return len(value) == 2 if name in [self._ID_VENDOR, self._ID_PRODUCT, self._BCD_DEVICE]: - return 4 == len(value) + return len(value) == 4 return False @staticmethod def parse_error(pos, end, target, line): log_exit( - "Malformed policy rule, unable to parse '{}', malformed line: {}" - .format(target[pos:end], line)) + "Malformed policy rule, unable to parse '{}', malformed line: {}".format( + target[pos:end], line + ) + ) def parse_line(self, line): - """ parse one line of policy file, generate rule, and append it to + """parse one line of policy file, generate rule, and append it to self.rule_list Example: @@ -413,13 +430,10 @@ def parse_line(self, line): # 2. split action and match field # ^\s*(ALLOW|DENY)\s*:\s*([^:]*)$ try: - action, target = [part.strip() for part in line.split(":")] + action, target = (part.strip() for part in line.split(":")) except ValueError as e: if line.rstrip(): - log_exit("Caught error {}, malformed line: {}" - .format(str(e), line)) - # empty line, just return - return + log_exit("Caught error {}, malformed line: {}".format(str(e), line)) # 3. parse action # \s*(ALLOW|DENY)\s* @@ -429,37 +443,39 @@ def parse_line(self, line): elif action.lower() == "deny": rule[self._ALLOW] = False else: - log_exit("Malformed action'{}', malformed line: {}".format( - action, line)) + log_exit("Malformed action'{}', malformed line: {}".format(action, line)) # 4. parse key=value pairs # pattern = r"\s*(class|subclass|prot|vid|pid|rel)\s*=\s*([0-9a-f]+)" last_end = 0 - for matchNum, match in enumerate(re.finditer(self._PATTERN, target, - re.IGNORECASE)): - if last_end != match.start(): - self.parse_error(last_end, match.start(), target, line) + name = "" + value = "" + for m in re.finditer(self._PATTERN, target, re.IGNORECASE): + if last_end != m.start(): + self.parse_error(last_end, m.start(), target, line) try: - name, value = [part.lower() for part in match.groups()] + name, value = (part.lower() for part in m.groups()) # This can happen if `part` is None except AttributeError: - self.parse_error(match.start(), match.end(), target, line) + self.parse_error(m.start(), m.end(), target, line) # This should never happen, because the regexp has exactly two # matching groups except ValueError: - self.parse_error(match.start(), match.end(), target, line) + self.parse_error(m.start(), m.end(), target, line) if not self.check_hex_length(name, value): - log_exit("hex'{}' length error, malformed line {}".format( - str(value), line)) + log_exit( + "hex'{}' length error, malformed line {}".format(str(value), line) + ) if name in rule: - log_exit("duplicated tag'{}' found, malformed line {}". - format(name, line)) + log_exit( + "duplicated tag'{}' found, malformed line {}".format(name, line) + ) rule[name] = value - last_end = match.end() + last_end = m.end() if last_end != len(target): self.parse_error(last_end, len(target) + 1, target, line) @@ -477,14 +493,20 @@ def match_device_interface(self, rule, device, interface): :return:(bool) if they match """ for k in [k for k in rule if k in self._KEY_MAP_DEVICE]: - log.debug("check {} props[{}] against {}".format( - interface.get_node(), k, str(rule))) + log.debug( + "check {} props[{}] against {}".format( + interface.get_node(), k, str(rule) + ) + ) if not hex_equal(rule[k], device[self._KEY_MAP_DEVICE[k]]): return False for k in [k for k in rule if k in self._KEY_MAP_INTERFACE]: - log.debug("check {} props[{}] against {}".format( - interface.get_node(), k, str(rule))) + log.debug( + "check {} props[{}] against {}".format( + interface.get_node(), k, str(rule) + ) + ) if not hex_equal(rule[k], interface[self._KEY_MAP_INTERFACE[k]]): return False @@ -549,16 +571,19 @@ def check(self, device): def parse_args(): - parser = argparse.ArgumentParser( - description="scanner to get USB devices info") - parser.add_argument("-d", "--diagnostic", dest="diagnostic", - action="store_true", - help="enable diagnostic mode") + parser = argparse.ArgumentParser(description="scanner to get USB devices info") + parser.add_argument( + "-d", + "--diagnostic", + dest="diagnostic", + action="store_true", + help="enable diagnostic mode", + ) return parser.parse_args() def to_pusb(device): - """ convert UsbDevice to pusb dict + """convert UsbDevice to pusb dict Example pusb dict: [ @@ -612,7 +637,7 @@ def to_pusb(device): def make_pusbs_list(devices, interfaces): - """ check the USB devices and interfaces against policy file, + """check the USB devices and interfaces against policy file, and return the pusb list that can be passed through :param devices:([UsbDevice]) USB device list we found in host @@ -633,7 +658,7 @@ def make_pusbs_list(devices, interfaces): return [to_pusb(d) for d in devices if d.is_ready() and policy.check(d)] -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover args = parse_args() if args.diagnostic: log.logToSyslog(level=logging.DEBUG) @@ -643,8 +668,8 @@ def make_pusbs_list(devices, interfaces): # get usb info try: devices, interfaces = get_usb_info() - except Exception as e: - log_exit("Failed to get usb info: {}".format(str(e))) + except Exception as ex: + log_exit("Failed to get usb info: {}".format(str(ex))) # debug info log_list(devices) diff --git a/python3/unittest/import_file.py b/python3/unittest/import_file.py index 2589e640232..581f8f4b401 100644 --- a/python3/unittest/import_file.py +++ b/python3/unittest/import_file.py @@ -22,4 +22,4 @@ def get_module(module_name, file_path): """get the module from a file""" testdir = os.path.dirname(__file__) print(testdir) - return import_from_file(module_name, testdir + file_path) + return import_from_file(module_name, "{}/{}".format(testdir, file_path)) diff --git a/python3/unittest/test_hfx_filename.py b/python3/unittest/test_hfx_filename.py index 6e6964a24f2..0fc4f5abba3 100644 --- a/python3/unittest/test_hfx_filename.py +++ b/python3/unittest/test_hfx_filename.py @@ -12,7 +12,7 @@ # mock modules to avoid dependencies sys.modules["XenAPI"] = MagicMock() -hfx_filename = get_module("hfx_filename", "/../bin/hfx_filename") +hfx_filename = get_module("hfx_filename", "../bin/hfx_filename") @patch("socket.socket") diff --git a/scripts/test_usb_scan.py b/python3/unittest/test_usb_scan.py similarity index 66% rename from scripts/test_usb_scan.py rename to python3/unittest/test_usb_scan.py index c64d89d8276..150cc16afba 100644 --- a/scripts/test_usb_scan.py +++ b/python3/unittest/test_usb_scan.py @@ -2,21 +2,22 @@ # # unittest for usb_scan.py -try: - from collections.abc import Mapping, Container, Iterable -except ImportError: # python2 - from collections import Mapping, Container, Iterable -import mock import os import shutil import sys import tempfile import unittest +from collections.abc import Mapping + +import mock +from import_file import get_module + def nottest(obj): obj.__test__ = False return obj + sys.modules["xcp"] = mock.Mock() sys.modules["xcp.logger"] = mock.Mock() sys.modules["pyudev"] = mock.Mock() @@ -26,11 +27,11 @@ class MocDeviceAttrs(Mapping): def __init__(self, device): self.d = device.get_attr() - def __iter__(self): + def __iter__(self): # pragma: no cover for name in self.d: yield name - def __len__(self): + def __len__(self): # pragma: no cover return len(self.d) def __getitem__(self, name): @@ -38,7 +39,6 @@ def __getitem__(self, name): class MocDevice(Mapping): - def __init__(self, d): self.d = d @@ -56,11 +56,11 @@ def get_attr(self): def attributes(self): return MocDeviceAttrs(self) - def __iter__(self): + def __iter__(self): # pragma: no cover for name in self.get_prop(): yield name - def __len__(self): + def __len__(self): # pragma: no cover return len(self.get_prop()) def __getitem__(self, name): @@ -68,7 +68,6 @@ def __getitem__(self, name): class MocEnumerator(object): - def __init__(self, ds): self.ds = ds @@ -78,17 +77,16 @@ def __iter__(self): class MocContext(object): - def __init__(self, devices, interfaces): self.devices = devices self.interfaces = interfaces def list_devices(self, **kwargs): - if "usb" == kwargs.pop("subsystem"): + if kwargs.pop("subsystem") == "usb": dev_type = kwargs.pop("DEVTYPE") - if "usb_device" == dev_type: + if dev_type == "usb_device": return MocEnumerator(self.devices) - elif "usb_interface" == dev_type: + elif dev_type == "usb_interface": return MocEnumerator(self.interfaces) return MocEnumerator([]) @@ -97,8 +95,7 @@ def mock_setup(mod, devices, interfaces, path): mod.log.error = test_log mod.log.debug = test_log mod.Policy._PATH = path - mod.pyudev.Context = mock.Mock(return_value=MocContext( - devices, interfaces)) + mod.pyudev.Context = mock.Mock(return_value=MocContext(devices, interfaces)) @nottest @@ -107,65 +104,70 @@ def test_log(m): class TestUsbScan(unittest.TestCase): - def setUp(self): - try: - self.work_dir = tempfile.mkdtemp(prefix="test_usb_scan") - except: - raise + self.work_dir = tempfile.mkdtemp(prefix="test_usb_scan") def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) @nottest - def test_usb_common(self, moc_devices, moc_interfaces, moc_results, - path="./scripts/usb-policy.conf"): - import usb_scan + def test_usb_common( + self, moc_devices, moc_interfaces, moc_results, path="./scripts/usb-policy.conf" + ): + usb_scan = get_module("usb_scan", "../libexec/usb_scan.py") + mock_setup(usb_scan, moc_devices, moc_interfaces, path) devices, interfaces = usb_scan.get_usb_info() + usb_scan.log_list(devices) + usb_scan.log_list(interfaces) + pusbs = usb_scan.make_pusbs_list(devices, interfaces) # pass pusbs in json to XAPI self.assertEqual(sorted(pusbs), sorted(moc_results)) @nottest - def test_usb_exit(self, devices, interfaces, results, - path="./scripts/usb-policy.conf", msg=""): + def test_usb_exit( + self, devices, interfaces, results, + path="./scripts/usb-policy.conf", + msg="" + ): # pylint: disable=too-many-arguments with self.assertRaises(SystemExit) as cm: self.test_usb_common(devices, interfaces, results, path) if msg: - self.assertIn(msg, cm.exception.code) + # cm.exception.code is int type whose format + # looks like "duplicated tag'vid' found, + # malformed line ALLOW:vid=056a vid=0314 class=03" + self.assertIn(msg, cm.exception.code) # pytype: disable=wrong-arg-types def test_usb_dongle(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": " 1", - "bConfigurationValue": "1", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - "speed": "480" - } + "idVendor": b"096e", + "bNumInterfaces": b" 1", + "bConfigurationValue": b"1", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + "speed": b"480", + }, } ] interfaces = [ { "name": "1-2:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"00", + }, } ] results = [ @@ -178,7 +180,7 @@ def test_usb_dongle(self): "vendor-id": "096e", "path": "1-2", "serial": "", - "speed": "480" + "speed": "480", } ] self.test_usb_common(devices, interfaces, results) @@ -187,30 +189,28 @@ def test_usb_dongle_on_hub(self): devices = [ { "name": "1-2.1", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": " 1", - "bConfigurationValue": "1", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - "speed": "12" - } + "idVendor": b"096e", + "bNumInterfaces": b" 1", + "bConfigurationValue": b"1", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + "speed": b"12", + }, } ] interfaces = [ { "name": "1-2.1:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"00", + }, } ] results = [ @@ -223,7 +223,7 @@ def test_usb_dongle_on_hub(self): "vendor-id": "096e", "path": "1-2.1", "serial": "", - "speed": "12" + "speed": "12", } ] self.test_usb_common(devices, interfaces, results) @@ -232,66 +232,59 @@ def test_usb_dongle_unbinded(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Feitian Technologies, Inc."}, "attrs": { - "idVendor": "096e", - "bNumInterfaces": "", - "bConfigurationValue": "", - "bcdDevice": "010a", - "version": " 1.10", - "idProduct": "0302", - "bDeviceClass": "00", - } + "idVendor": b"096e", + "bNumInterfaces": b"", + "bConfigurationValue": b"", + "bcdDevice": b"010a", + "version": b" 1.10", + "idProduct": b"0302", + "bDeviceClass": b"00", + }, } ] - interfaces = [ - ] - results = [ - ] + interfaces = [] + results = [] self.test_usb_common(devices, interfaces, results) def test_usb_keyboard(self): devices = [ { "name": "1-2", - "props": { - "ID_VENDOR_FROM_DATABASE": "Dell Computer Corp." - }, + "props": {"ID_VENDOR_FROM_DATABASE": "Dell Computer Corp."}, "attrs": { - "idVendor": "413c", - "bNumInterfaces": " 2", - "bConfigurationValue": "1", - "bcdDevice": "0110", - "version": " 2.00", - "idProduct": "2113", - "bDeviceClass": "00", - } + "idVendor": b"413c", + "bNumInterfaces": b" 2", + "bConfigurationValue": b"1", + "bcdDevice": b"0110", + "version": b" 2.00", + "idProduct": b"2113", + "bDeviceClass": b"00", + }, } ] interfaces = [ { "name": "1-2:1.0", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "01", - "bInterfaceProtocol": "01", - "bInterfaceNumber": "00", - } + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"01", + "bInterfaceProtocol": b"01", + "bInterfaceNumber": b"00", + }, }, { "name": "1-2:1.1", "attrs": { - "bInterfaceClass": "03", - "bInterfaceSubClass": "00", - "bInterfaceProtocol": "00", - "bInterfaceNumber": "01", - } - } - ] - results = [ + "bInterfaceClass": b"03", + "bInterfaceSubClass": b"00", + "bInterfaceProtocol": b"00", + "bInterfaceNumber": b"01", + }, + }, ] + results = [] self.test_usb_common(devices, interfaces, results) def test_usb_config_missing(self): @@ -309,8 +302,7 @@ def test_usb_config_error_unexpected_chars_with_comment(self): ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.test_usb_config_error_common(content, "to unpack") def test_usb_config_error_duplicated_key(self): content = """# duplicated key word @@ -377,13 +369,11 @@ def test_usb_config_error_unexpected_non_empty_line(self): aa ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.test_usb_config_error_common(content, "to unpack") def test_usb_config_error_missing_colon(self): content = """# missing colon after action ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW # Otherwise allow everything else """ - self.test_usb_config_error_common(content, - "to unpack") + self.test_usb_config_error_common(content, "to unpack") diff --git a/scripts/Makefile b/scripts/Makefile index 83b5526780a..d5984d927a7 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -110,7 +110,6 @@ install: $(IPROG) upload-wrapper logs-download $(DESTDIR)$(LIBEXECDIR) $(IDATA) usb-policy.conf $(DESTDIR)$(ETCXENDIR) $(IPROG) usb_reset.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) usb_scan.py $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(OPTDIR)/packages/iso #omg XXX $(IPROG) xapi-rolling-upgrade-miami $(DESTDIR)$(LIBEXECDIR)/xapi-rolling-upgrade $(IPROG) set-hostname $(DESTDIR)$(LIBEXECDIR) @@ -195,4 +194,3 @@ endif $(IDATA) mail-languages/ja-JP.json $(DESTDIR)/etc/xapi.d/mail-languages # uefi mkdir -p $(DESTDIR)/etc/xapi.d/efi-clone - From aac47d37a1841f249ac7100ffb62e9d93b44b740 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 4 Mar 2024 05:52:09 +0000 Subject: [PATCH 016/341] CP-47334: Move nbd_client_manager.py to python3/libexec Signed-off-by: Stephen Cheng --- python3/Makefile | 3 ++- {scripts => python3/libexec}/nbd_client_manager.py | 0 python3/unittest/test_nbd_client_manager.py | 0 scripts/Makefile | 1 - 4 files changed, 2 insertions(+), 2 deletions(-) rename {scripts => python3/libexec}/nbd_client_manager.py (100%) create mode 100644 python3/unittest/test_nbd_client_manager.py diff --git a/python3/Makefile b/python3/Makefile index 02d819443ed..e85e199f705 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -10,6 +10,7 @@ install: mkdir -p $(DESTDIR)$(SITE3_DIR) mkdir -p $(DESTDIR)$(LIBEXECDIR) - $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin + $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) diff --git a/scripts/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py similarity index 100% rename from scripts/nbd_client_manager.py rename to python3/libexec/nbd_client_manager.py diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/scripts/Makefile b/scripts/Makefile index d5984d927a7..51dc1b092f6 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -30,7 +30,6 @@ install: $(IPROG) xn_diagnostics $(DESTDIR)$(LIBEXECDIR) $(IPROG) thread_diagnostics $(DESTDIR)$(LIBEXECDIR) $(IPROG) list_plugins $(DESTDIR)$(LIBEXECDIR) - $(IPROG) nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/xapi mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/xenopsd mkdir -p $(DESTDIR)$(ETCXENDIR)/bugtool/observer From a6ede57f308ba5b2b9416ef8cba0f3362b4b0511 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 4 Mar 2024 05:59:15 +0000 Subject: [PATCH 017/341] CP-47334: Formatting by "black" tool Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 114 +++++++++++++++----------- 1 file changed, 65 insertions(+), 49 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index bebe97a2587..a832729b847 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 """ Provides functions and a CLI for safely connecting to and disconnecting from @@ -20,7 +20,7 @@ LOGGER = logging.getLogger("nbd_client_manager") LOGGER.setLevel(logging.DEBUG) -LOCK_FILE = '/var/run/nonpersistent/nbd_client_manager' +LOCK_FILE = "/var/run/nonpersistent/nbd_client_manager" # Don't wait more than 10 minutes for the NBD device MAX_DEVICE_WAIT_MINUTES = 10 @@ -31,14 +31,17 @@ class NbdDeviceNotFound(Exception): The NBD device file does not exist. Raised when there are no free NBD devices. """ + def __init__(self, nbd_device): super(NbdDeviceNotFound, self).__init__( - "NBD device '{}' does not exist".format(nbd_device)) + "NBD device '{}' does not exist".format(nbd_device) + ) self.nbd_device = nbd_device class FileLock(object): """Container for data relating to a file lock""" + def __init__(self, path): self._path = path self._lock_file = None @@ -46,7 +49,7 @@ def __init__(self, path): def _lock(self): """Acquire the lock""" flags = fcntl.LOCK_EX - self._lock_file = open(self._path, 'w+') + self._lock_file = open(self._path, "w+") fcntl.flock(self._lock_file, flags) def _unlock(self): @@ -73,25 +76,19 @@ def _call(cmd_args, error=True): """ LOGGER.debug("Running cmd %s", cmd_args) proc = subprocess.Popen( - cmd_args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True + cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) stdout, stderr = proc.communicate() if error and proc.returncode != 0: LOGGER.error( - "%s exitted with code %d: %s", - ' '.join(cmd_args), - proc.returncode, - stderr) + "%s exitted with code %d: %s", " ".join(cmd_args), proc.returncode, stderr + ) raise subprocess.CalledProcessError( - returncode=proc.returncode, - cmd=cmd_args, - output=stderr) + returncode=proc.returncode, cmd=cmd_args, output=stderr + ) return proc.returncode @@ -105,7 +102,7 @@ def _is_nbd_device_connected(nbd_device): # 1 for a non-existent file. if not os.path.exists(nbd_device): raise NbdDeviceNotFound(nbd_device) - cmd = ['nbd-client', '-check', nbd_device] + cmd = ["nbd-client", "-check", nbd_device] returncode = _call(cmd, error=False) if returncode == 0: return True @@ -133,31 +130,37 @@ def _wait_for_nbd_device(nbd_device, connected): if datetime.now() > deadline: raise Exception( "Timed out waiting for connection state of device %s to be %s" - % (nbd_device, connected)) + % (nbd_device, connected) + ) LOGGER.debug( - 'Connection status of NBD device %s not yet %s, waiting', + "Connection status of NBD device %s not yet %s, waiting", nbd_device, - connected) + connected, + ) time.sleep(0.1) + PERSISTENT_INFO_DIR = "/var/run/nonpersistent/nbd" + def _get_persistent_connect_info_filename(device): """ Return the full path for the persistent file containing the connection details. This is based on the device name, so /dev/nbd0 -> /var/run/nonpersistent/nbd/0 """ - number = re.search('/dev/nbd([0-9]+)', device).group(1) - return PERSISTENT_INFO_DIR + '/' + number + number = re.search("/dev/nbd([0-9]+)", device).group(1) + return PERSISTENT_INFO_DIR + "/" + number + def _persist_connect_info(device, path, exportname): if not os.path.exists(PERSISTENT_INFO_DIR): os.makedirs(PERSISTENT_INFO_DIR) filename = _get_persistent_connect_info_filename(device) - with open(filename, 'w') as info_file: - info_file.write(json.dumps({'path':path, 'exportname':exportname})) + with open(filename, "w") as info_file: + info_file.write(json.dumps({"path": path, "exportname": exportname})) + def _remove_persistent_connect_info(device): try: @@ -165,22 +168,34 @@ def _remove_persistent_connect_info(device): except OSError: pass + def connect_nbd(path, exportname): """Connects to a free NBD device using nbd-client and returns its path""" # We should not ask for too many nbds, as we might not have enough memory - _call(['modprobe', 'nbd', 'nbds_max=24']) + _call(["modprobe", "nbd", "nbds_max=24"]) retries = 0 while True: try: with FILE_LOCK: nbd_device = _find_unused_nbd_device() - cmd = ['nbd-client', '-unix', path, nbd_device, - '-timeout', '60', '-name', exportname] + cmd = [ + "nbd-client", + "-unix", + path, + nbd_device, + "-timeout", + "60", + "-name", + exportname, + ] _call(cmd) _wait_for_nbd_device(nbd_device=nbd_device, connected=True) _persist_connect_info(nbd_device, path, exportname) - nbd = (nbd_device[len('/dev/'):] - if nbd_device.startswith('/dev/') else nbd_device) + nbd = ( + nbd_device[len("/dev/") :] + if nbd_device.startswith("/dev/") + else nbd_device + ) with open("/sys/block/" + nbd + "/queue/scheduler", "w") as fd: fd.write("none") # Set the NBD queue size to the same as the qcow2 cluster size @@ -191,7 +206,7 @@ def connect_nbd(path, exportname): return nbd_device except NbdDeviceNotFound as exn: - LOGGER.warn('Failed to find free nbd device: %s', exn) + LOGGER.warn("Failed to find free nbd device: %s", exn) retries = retries + 1 if retries == 1: # We sleep for a shorter period first, in case an nbd device @@ -212,7 +227,7 @@ def disconnect_nbd_device(nbd_device): try: if _is_nbd_device_connected(nbd_device=nbd_device): _remove_persistent_connect_info(nbd_device) - cmd = ['nbd-client', '-disconnect', nbd_device] + cmd = ["nbd-client", "-disconnect", nbd_device] _call(cmd) _wait_for_nbd_device(nbd_device=nbd_device, connected=False) except NbdDeviceNotFound: @@ -220,10 +235,9 @@ def disconnect_nbd_device(nbd_device): pass - def _connect_cli(args): device = connect_nbd(path=args.path, exportname=args.exportname) - print device + print(device) def _disconnect_cli(args): @@ -234,39 +248,41 @@ def _main(): # Configure the root logger to log into syslog # (Specifically, into /var/log/user.log) syslog_handler = logging.handlers.SysLogHandler( - address='/dev/log', - facility=logging.handlers.SysLogHandler.LOG_USER) + address="/dev/log", facility=logging.handlers.SysLogHandler.LOG_USER + ) # Ensure the program name is included in the log messages: - formatter = logging.Formatter('%(name)s: [%(levelname)s] %(message)s') + formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s") syslog_handler.setFormatter(formatter) logging.getLogger().addHandler(syslog_handler) try: parser = argparse.ArgumentParser( - description="Connect to and disconnect from an NBD device") + description="Connect to and disconnect from an NBD device" + ) - subparsers = parser.add_subparsers(dest='command_name') + subparsers = parser.add_subparsers(dest="command_name") parser_connect = subparsers.add_parser( - 'connect', - help='Connect to a free NBD device and return its path') + "connect", help="Connect to a free NBD device and return its path" + ) parser_connect.add_argument( - '--path', + "--path", required=True, - help="The path of the Unix domain socket of the NBD server") + help="The path of the Unix domain socket of the NBD server", + ) parser_connect.add_argument( - '--exportname', + "--exportname", required=True, - help="The export name of the device to connect to") + help="The export name of the device to connect to", + ) parser_connect.set_defaults(func=_connect_cli) parser_disconnect = subparsers.add_parser( - 'disconnect', - help='Disconnect from the given NBD device') + "disconnect", help="Disconnect from the given NBD device" + ) parser_disconnect.add_argument( - '--device', - required=True, - help="The path of the NBD device to disconnect") + "--device", required=True, help="The path of the NBD device to disconnect" + ) parser_disconnect.set_defaults(func=_disconnect_cli) args = parser.parse_args() @@ -276,5 +292,5 @@ def _main(): raise -if __name__ == '__main__': +if __name__ == "__main__": _main() From 13a51211cca0040b027bd3ea90ef9a7ee88faf81 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 4 Mar 2024 08:58:30 +0000 Subject: [PATCH 018/341] CP-47334: Migrate nbd_client_manager.py to python3 Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index a832729b847..aece3f55fed 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -6,17 +6,16 @@ """ import argparse +import fcntl +import json import logging import logging.handlers import os +import re import subprocess import time -import fcntl -import json -import re from datetime import datetime, timedelta - LOGGER = logging.getLogger("nbd_client_manager") LOGGER.setLevel(logging.DEBUG) @@ -76,7 +75,8 @@ def _call(cmd_args, error=True): """ LOGGER.debug("Running cmd %s", cmd_args) proc = subprocess.Popen( - cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True + cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, + universal_newlines=True ) stdout, stderr = proc.communicate() @@ -158,7 +158,7 @@ def _persist_connect_info(device, path, exportname): if not os.path.exists(PERSISTENT_INFO_DIR): os.makedirs(PERSISTENT_INFO_DIR) filename = _get_persistent_connect_info_filename(device) - with open(filename, "w") as info_file: + with open(filename, "w", encoding="utf-8") as info_file: info_file.write(json.dumps({"path": path, "exportname": exportname})) @@ -196,12 +196,12 @@ def connect_nbd(path, exportname): if nbd_device.startswith("/dev/") else nbd_device ) - with open("/sys/block/" + nbd + "/queue/scheduler", "w") as fd: + with open("/sys/block/" + nbd + "/queue/scheduler", "w", encoding="utf-8") as fd: fd.write("none") # Set the NBD queue size to the same as the qcow2 cluster size - with open("/sys/block/" + nbd + "/queue/max_sectors_kb", "w") as fd: + with open("/sys/block/" + nbd + "/queue/max_sectors_kb", "w", encoding="utf-8") as fd: fd.write("512") - with open("/sys/block/" + nbd + "/queue/nr_requests", "w") as fd: + with open("/sys/block/" + nbd + "/queue/nr_requests", "w", encoding="utf-8") as fd: fd.write("8") return nbd_device From 031ee7c446579e32fd6473a2b6ed5bc4d9ea407b Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 6 Mar 2024 05:31:01 +0000 Subject: [PATCH 019/341] CP-47334: Add unit tests for nbd_client_manager.py Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 8 +- python3/unittest/test_nbd_client_manager.py | 231 ++++++++++++++++++++ 2 files changed, 235 insertions(+), 4 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index aece3f55fed..aefe1002ee3 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -37,8 +37,7 @@ def __init__(self, nbd_device): ) self.nbd_device = nbd_device - -class FileLock(object): +class FileLock: # pragma: no cover """Container for data relating to a file lock""" def __init__(self, path): @@ -243,8 +242,9 @@ def _connect_cli(args): def _disconnect_cli(args): disconnect_nbd_device(nbd_device=args.device) - -def _main(): +# The main function is covered by manual test and XenRT test +# Exclude it from unit test coverage +def _main(): # pragma: no cover # Configure the root logger to log into syslog # (Specifically, into /var/log/user.log) syslog_handler = logging.handlers.SysLogHandler( diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index e69de29bb2d..461755c8ba7 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +""" +This module provides unittest for nbd_client_manager.py +""" + +import sys +import unittest +import subprocess +from mock import MagicMock, patch, mock_open, call +from import_file import get_module + +nbd_client_manager = get_module("nbd_client_manager", "../libexec/nbd_client_manager.py") + +# mock modules to avoid dependencies +sys.modules["XenAPI"] = MagicMock() + +@patch('subprocess.Popen') +class TestCallFunction(unittest.TestCase): + + def test_call_success(self, mock_popen): + mock_process = mock_popen.return_value + mock_process.communicate.return_value = ("ls -l output", "") + mock_process.returncode = 0 + + returncode = nbd_client_manager._call(["ls", "-l"]) + + self.assertEqual(returncode, 0) + + def test_call_failure(self, mock_popen): + mock_process = mock_popen.return_value + mock_process.communicate.return_value = ("", "err") + mock_process.returncode = 1 + + with self.assertRaises(subprocess.CalledProcessError) as cm: + nbd_client_manager._call(["invalid_cmd"]) + + self.assertEqual(cm.exception.returncode, 1) + +@patch('nbd_client_manager.os.path.exists') +@patch('nbd_client_manager._call') +class TestIsNbdDeviceConnected(unittest.TestCase): + + def test_nbd_device_connected(self, mock_call, mock_exists): + mock_exists.return_value = True + mock_call.return_value = 0 + + result = nbd_client_manager._is_nbd_device_connected('/dev/nbd0') + + self.assertTrue(result) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd0"], error=False) + + def test_nbd_device_not_connected(self, mock_call, mock_exists): + mock_exists.return_value = True + mock_call.return_value = 1 + + result = nbd_client_manager._is_nbd_device_connected('/dev/nbd1') + + self.assertFalse(result) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd1"], error=False) + + def test_nbd_device_not_found(self, mock_call, mock_exists): + mock_exists.return_value = False + + # Testing the function with a non-existent device + with self.assertRaises(nbd_client_manager.NbdDeviceNotFound): + nbd_client_manager._is_nbd_device_connected('/dev/nbd2') + +@patch('nbd_client_manager._is_nbd_device_connected') +class TestFindUnusedNbdDevice(unittest.TestCase): + def test_find_unused_nbd_device(self, mock_is_nbd_device_connected): + # Mocking the function to return True for /dev/nbd0 and False for /dev/nbd1 + mock_is_nbd_device_connected.side_effect = [True, False] + + # Testing the function + unused_device = nbd_client_manager._find_unused_nbd_device() + + # Assertion + self.assertEqual(unused_device, "/dev/nbd1") + + def test_no_unused_nbd_device(self, mock_is_nbd_device_connected): + # Mocking the function to always raise NbdDeviceNotFound + mock_is_nbd_device_connected.side_effect = nbd_client_manager.NbdDeviceNotFound('/dev/nbd1') + + # Testing the function when no unused devices are found + with self.assertRaises(nbd_client_manager.NbdDeviceNotFound): + nbd_client_manager._find_unused_nbd_device() + +@patch('nbd_client_manager._is_nbd_device_connected') +class TestWaitForNbdDevice(unittest.TestCase): + def test_wait_for_nbd_device_connected(self, mock_is_nbd_device_connected): + mock_is_nbd_device_connected.return_value = True + nbd_client_manager._wait_for_nbd_device('/dev/nbd0', connected=True) + mock_is_nbd_device_connected.assert_called_once_with(nbd_device='/dev/nbd0') + + def test_wait_for_nbd_device_disconnected(self, mock_is_nbd_device_connected): + mock_is_nbd_device_connected.return_value = False + nbd_client_manager._wait_for_nbd_device('/dev/nbd1', connected=False) + mock_is_nbd_device_connected.assert_called_once_with(nbd_device='/dev/nbd1') + +class TestGetPersistentConnectInfoFilename(unittest.TestCase): + def test_get_persistent_connect_info_filename(self): + # Test for device /dev/nbd0 + device = "/dev/nbd0" + expected_filename = f"{nbd_client_manager.PERSISTENT_INFO_DIR}/0" + self.assertEqual(nbd_client_manager._get_persistent_connect_info_filename(device), expected_filename) + +@patch('nbd_client_manager.os.makedirs') +@patch('nbd_client_manager.os.path.exists') +class TestPersistConnectInfo(unittest.TestCase): + + def test_persist_connect_info(self, mock_exists, mock_makedirs): + mock_exists.return_value = False + + # Test data + device = "/dev/nbd0" + path = "/some/path" + exportname = "example_export" + + # Setting up mock for file write + mock_file = mock_open() + with patch('builtins.open', mock_file): + # Run the function + nbd_client_manager._persist_connect_info(device, path, exportname) + + # Assertions + mock_makedirs.assert_called_once_with(nbd_client_manager.PERSISTENT_INFO_DIR) + mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') + mock_file().write.assert_called_once_with('{"path": "/some/path", "exportname": "example_export"}') + + def test_persist_connect_info_directory_exists(self, mock_exists, mock_makedirs): + mock_exists.return_value = True + + # Test data + device = "/dev/nbd0" + path = "/some/path" + exportname = "example_export" + + # Setting up mock for file write + mock_file = mock_open() + with patch('builtins.open', mock_file): + # Run the function + nbd_client_manager._persist_connect_info(device, path, exportname) + + # Assertions + mock_makedirs.assert_not_called() + mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') + mock_file().write.assert_called_once_with('{"path": "/some/path", "exportname": "example_export"}') + +class TestRemovePersistentConnectInfo(unittest.TestCase): + @patch('nbd_client_manager.os.remove') + def test_remove_persistent_connect_info(self, mock_os_remove): + nbd_client_manager._remove_persistent_connect_info('/dev/nbd0') + mock_os_remove.assert_called_once_with('/var/run/nonpersistent/nbd/0') + +class TestConnectNbd(unittest.TestCase): + @patch('nbd_client_manager._call') + @patch('nbd_client_manager._find_unused_nbd_device') + @patch('nbd_client_manager._wait_for_nbd_device') + @patch('nbd_client_manager._persist_connect_info') + @patch('nbd_client_manager.open') + @patch('nbd_client_manager.FILE_LOCK', MagicMock()) # Mocking FILE_LOCK + def test_connect_nbd(self, mock_open, mock_persist_info, mock_wait_for_nbd, mock_find_unused, mock_call): + # Mocking necessary functions and file operations + mock_find_unused.return_value = "/dev/nbd0" + mock_call.return_value = 0 + mock_file_scheduler = MagicMock() + mock_file_max_sectors_kb = MagicMock() + mock_file_nr_requests = MagicMock() + mock_open.side_effect = [mock_file_scheduler, mock_file_max_sectors_kb, mock_file_nr_requests] + + # Testing the function + result = nbd_client_manager.connect_nbd("/path/of/socket/file", "export_name") + + # Assertions + self.assertEqual(result, "/dev/nbd0") + mock_find_unused.assert_called_once() + mock_call.assert_called() + mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=True) + mock_persist_info.assert_called_once_with("/dev/nbd0", "/path/of/socket/file", "export_name") + # Checking open calls + mock_open.assert_has_calls([ + call("/sys/block/nbd0/queue/scheduler", "w", encoding="utf-8"), + call("/sys/block/nbd0/queue/max_sectors_kb", "w", encoding="utf-8"), + call("/sys/block/nbd0/queue/nr_requests", "w", encoding="utf-8") + ], any_order=True) + +@patch('nbd_client_manager._is_nbd_device_connected') +@patch('nbd_client_manager._remove_persistent_connect_info') +@patch('nbd_client_manager._call') +@patch('nbd_client_manager._wait_for_nbd_device') +class TestDisconnectNbdDevice(unittest.TestCase): + + def test_disconnect_nbd_device_connected(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to return True + mock_is_connected.return_value = True + + # Testing the function when device is connected + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_called_once_with("/dev/nbd0") + mock_call.assert_called_once_with(["nbd-client", "-disconnect", "/dev/nbd0"]) + mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=False) + + def test_disconnect_nbd_device_disconnected(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to return False + mock_is_connected.return_value = False + + # Testing the function when device is already disconnected + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_not_called() + mock_call.assert_not_called() + mock_wait_for_nbd.assert_not_called() + + def test_disconnect_nbd_device_not_found(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + # Mocking _is_nbd_device_connected to raise NbdDeviceNotFound + mock_is_connected.side_effect = nbd_client_manager.NbdDeviceNotFound('/dev/nbd0') + + # Testing the function when device is not found + nbd_client_manager.disconnect_nbd_device("/dev/nbd0") + + # Assertions + mock_is_connected.assert_called_once_with(nbd_device="/dev/nbd0") + mock_remove_persistent.assert_not_called() + mock_call.assert_not_called() + mock_wait_for_nbd.assert_not_called() + From 8f8708801886f525f3a2eac404f5ddeda2347d6d Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 7 Mar 2024 00:57:54 +0000 Subject: [PATCH 020/341] CP-47334: Fix pylint and pytype issues Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 51 +++++++++++++++------ python3/unittest/test_nbd_client_manager.py | 51 ++++++++++++++------- 2 files changed, 71 insertions(+), 31 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index aefe1002ee3..5179ccc21cc 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -25,6 +25,18 @@ MAX_DEVICE_WAIT_MINUTES = 10 +class NotGetNbdNumber(Exception): + """ + The NBD device should be in this format: nbd{0-100} + If we cannot match this pattern, raise this exception + """ + +class NbdConnStateTimeout(Exception): + """ + If we cannot get the connection status of a nbd device, + raise this exception. + """ + class NbdDeviceNotFound(Exception): """ The NBD device file does not exist. Raised when there are no free NBD @@ -32,8 +44,8 @@ class NbdDeviceNotFound(Exception): """ def __init__(self, nbd_device): - super(NbdDeviceNotFound, self).__init__( - "NBD device '{}' does not exist".format(nbd_device) + super().__init__( + f"NBD device '{nbd_device}' does not exist" ) self.nbd_device = nbd_device @@ -47,7 +59,8 @@ def __init__(self, path): def _lock(self): """Acquire the lock""" flags = fcntl.LOCK_EX - self._lock_file = open(self._path, "w+") + # pylint: disable=consider-using-with + self._lock_file = open(self._path, "w+", encoding="utf8") fcntl.flock(self._lock_file, flags) def _unlock(self): @@ -73,12 +86,13 @@ def _call(cmd_args, error=True): If [error] and exit code != 0, log and throws a CalledProcessError. """ LOGGER.debug("Running cmd %s", cmd_args) + # pylint: disable=consider-using-with proc = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True ) - stdout, stderr = proc.communicate() + _, stderr = proc.communicate() if error and proc.returncode != 0: LOGGER.error( @@ -117,19 +131,22 @@ def _find_unused_nbd_device(): Raises NbdDeviceNotFound if no devices are available. """ for device_no in range(0, 1000): - nbd_device = "/dev/nbd{}".format(device_no) + nbd_device = f"/dev/nbd{device_no}" if not _is_nbd_device_connected(nbd_device=nbd_device): return nbd_device - + # Actually `_is_nbd_device_connected` will raise an exception + # if no unused device + # Add this return for pylint check + return None def _wait_for_nbd_device(nbd_device, connected): deadline = datetime.now() + timedelta(minutes=MAX_DEVICE_WAIT_MINUTES) while _is_nbd_device_connected(nbd_device=nbd_device) != connected: if datetime.now() > deadline: - raise Exception( - "Timed out waiting for connection state of device %s to be %s" - % (nbd_device, connected) + raise NbdConnStateTimeout( + f"Timed out waiting for connection state of " + f"device {nbd_device} to be {connected}" ) LOGGER.debug( @@ -149,7 +166,10 @@ def _get_persistent_connect_info_filename(device): the connection details. This is based on the device name, so /dev/nbd0 -> /var/run/nonpersistent/nbd/0 """ - number = re.search("/dev/nbd([0-9]+)", device).group(1) + matched = re.search("/dev/nbd([0-9]+)", device) + if not matched: + raise NotGetNbdNumber(f"Can not get the nbd number for device: {device}") + number = matched.group(1) return PERSISTENT_INFO_DIR + "/" + number @@ -195,17 +215,20 @@ def connect_nbd(path, exportname): if nbd_device.startswith("/dev/") else nbd_device ) - with open("/sys/block/" + nbd + "/queue/scheduler", "w", encoding="utf-8") as fd: + with open("/sys/block/" + nbd + "/queue/scheduler", + "w", encoding="utf-8") as fd: fd.write("none") # Set the NBD queue size to the same as the qcow2 cluster size - with open("/sys/block/" + nbd + "/queue/max_sectors_kb", "w", encoding="utf-8") as fd: + with open("/sys/block/" + nbd + "/queue/max_sectors_kb", + "w", encoding="utf-8") as fd: fd.write("512") - with open("/sys/block/" + nbd + "/queue/nr_requests", "w", encoding="utf-8") as fd: + with open("/sys/block/" + nbd + "/queue/nr_requests", + "w", encoding="utf-8") as fd: fd.write("8") return nbd_device except NbdDeviceNotFound as exn: - LOGGER.warn("Failed to find free nbd device: %s", exn) + LOGGER.warning("Failed to find free nbd device: %s", exn) retries = retries + 1 if retries == 1: # We sleep for a shorter period first, in case an nbd device diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index 461755c8ba7..31f6d8083ca 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -14,6 +14,9 @@ # mock modules to avoid dependencies sys.modules["XenAPI"] = MagicMock() +# pylint: disable=protected-access +# pylint: disable=missing-function-docstring +# pylint: disable=missing-class-docstring @patch('subprocess.Popen') class TestCallFunction(unittest.TestCase): @@ -35,11 +38,11 @@ def test_call_failure(self, mock_popen): nbd_client_manager._call(["invalid_cmd"]) self.assertEqual(cm.exception.returncode, 1) - + @patch('nbd_client_manager.os.path.exists') -@patch('nbd_client_manager._call') class TestIsNbdDeviceConnected(unittest.TestCase): + @patch('nbd_client_manager._call') def test_nbd_device_connected(self, mock_call, mock_exists): mock_exists.return_value = True mock_call.return_value = 0 @@ -49,6 +52,7 @@ def test_nbd_device_connected(self, mock_call, mock_exists): self.assertTrue(result) mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd0"], error=False) + @patch('nbd_client_manager._call') def test_nbd_device_not_connected(self, mock_call, mock_exists): mock_exists.return_value = True mock_call.return_value = 1 @@ -58,13 +62,13 @@ def test_nbd_device_not_connected(self, mock_call, mock_exists): self.assertFalse(result) mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd1"], error=False) - def test_nbd_device_not_found(self, mock_call, mock_exists): + def test_nbd_device_not_found(self, mock_exists): mock_exists.return_value = False # Testing the function with a non-existent device with self.assertRaises(nbd_client_manager.NbdDeviceNotFound): nbd_client_manager._is_nbd_device_connected('/dev/nbd2') - + @patch('nbd_client_manager._is_nbd_device_connected') class TestFindUnusedNbdDevice(unittest.TestCase): def test_find_unused_nbd_device(self, mock_is_nbd_device_connected): @@ -102,7 +106,8 @@ def test_get_persistent_connect_info_filename(self): # Test for device /dev/nbd0 device = "/dev/nbd0" expected_filename = f"{nbd_client_manager.PERSISTENT_INFO_DIR}/0" - self.assertEqual(nbd_client_manager._get_persistent_connect_info_filename(device), expected_filename) + self.assertEqual(nbd_client_manager._get_persistent_connect_info_filename(device), + expected_filename) @patch('nbd_client_manager.os.makedirs') @patch('nbd_client_manager.os.path.exists') @@ -110,7 +115,7 @@ class TestPersistConnectInfo(unittest.TestCase): def test_persist_connect_info(self, mock_exists, mock_makedirs): mock_exists.return_value = False - + # Test data device = "/dev/nbd0" path = "/some/path" @@ -125,11 +130,13 @@ def test_persist_connect_info(self, mock_exists, mock_makedirs): # Assertions mock_makedirs.assert_called_once_with(nbd_client_manager.PERSISTENT_INFO_DIR) mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') - mock_file().write.assert_called_once_with('{"path": "/some/path", "exportname": "example_export"}') + mock_file().write.assert_called_once_with( + '{"path": "/some/path", "exportname": "example_export"}' + ) def test_persist_connect_info_directory_exists(self, mock_exists, mock_makedirs): mock_exists.return_value = True - + # Test data device = "/dev/nbd0" path = "/some/path" @@ -144,7 +151,9 @@ def test_persist_connect_info_directory_exists(self, mock_exists, mock_makedirs) # Assertions mock_makedirs.assert_not_called() mock_file.assert_called_once_with('/var/run/nonpersistent/nbd/0', 'w', encoding='utf-8') - mock_file().write.assert_called_once_with('{"path": "/some/path", "exportname": "example_export"}') + mock_file().write.assert_called_once_with( + '{"path": "/some/path", "exportname": "example_export"}' + ) class TestRemovePersistentConnectInfo(unittest.TestCase): @patch('nbd_client_manager.os.remove') @@ -159,14 +168,18 @@ class TestConnectNbd(unittest.TestCase): @patch('nbd_client_manager._persist_connect_info') @patch('nbd_client_manager.open') @patch('nbd_client_manager.FILE_LOCK', MagicMock()) # Mocking FILE_LOCK - def test_connect_nbd(self, mock_open, mock_persist_info, mock_wait_for_nbd, mock_find_unused, mock_call): + # pylint: disable=too-many-arguments + def test_connect_nbd(self, mock_openfile, mock_persist_info, + mock_wait_for_nbd, mock_find_unused, mock_call): # Mocking necessary functions and file operations mock_find_unused.return_value = "/dev/nbd0" mock_call.return_value = 0 mock_file_scheduler = MagicMock() mock_file_max_sectors_kb = MagicMock() mock_file_nr_requests = MagicMock() - mock_open.side_effect = [mock_file_scheduler, mock_file_max_sectors_kb, mock_file_nr_requests] + mock_openfile.side_effect = [mock_file_scheduler, + mock_file_max_sectors_kb, + mock_file_nr_requests] # Testing the function result = nbd_client_manager.connect_nbd("/path/of/socket/file", "export_name") @@ -176,9 +189,11 @@ def test_connect_nbd(self, mock_open, mock_persist_info, mock_wait_for_nbd, mock mock_find_unused.assert_called_once() mock_call.assert_called() mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=True) - mock_persist_info.assert_called_once_with("/dev/nbd0", "/path/of/socket/file", "export_name") + mock_persist_info.assert_called_once_with( + "/dev/nbd0", "/path/of/socket/file", "export_name" + ) # Checking open calls - mock_open.assert_has_calls([ + mock_openfile.assert_has_calls([ call("/sys/block/nbd0/queue/scheduler", "w", encoding="utf-8"), call("/sys/block/nbd0/queue/max_sectors_kb", "w", encoding="utf-8"), call("/sys/block/nbd0/queue/nr_requests", "w", encoding="utf-8") @@ -190,7 +205,8 @@ def test_connect_nbd(self, mock_open, mock_persist_info, mock_wait_for_nbd, mock @patch('nbd_client_manager._wait_for_nbd_device') class TestDisconnectNbdDevice(unittest.TestCase): - def test_disconnect_nbd_device_connected(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + def test_disconnect_nbd_device_connected(self, mock_wait_for_nbd, + mock_call, mock_remove_persistent, mock_is_connected): # Mocking _is_nbd_device_connected to return True mock_is_connected.return_value = True @@ -203,7 +219,8 @@ def test_disconnect_nbd_device_connected(self, mock_wait_for_nbd, mock_call, moc mock_call.assert_called_once_with(["nbd-client", "-disconnect", "/dev/nbd0"]) mock_wait_for_nbd.assert_called_once_with(nbd_device="/dev/nbd0", connected=False) - def test_disconnect_nbd_device_disconnected(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + def test_disconnect_nbd_device_disconnected(self, mock_wait_for_nbd, mock_call, + mock_remove_persistent, mock_is_connected): # Mocking _is_nbd_device_connected to return False mock_is_connected.return_value = False @@ -216,7 +233,8 @@ def test_disconnect_nbd_device_disconnected(self, mock_wait_for_nbd, mock_call, mock_call.assert_not_called() mock_wait_for_nbd.assert_not_called() - def test_disconnect_nbd_device_not_found(self, mock_wait_for_nbd, mock_call, mock_remove_persistent, mock_is_connected): + def test_disconnect_nbd_device_not_found(self, mock_wait_for_nbd, mock_call, + mock_remove_persistent, mock_is_connected): # Mocking _is_nbd_device_connected to raise NbdDeviceNotFound mock_is_connected.side_effect = nbd_client_manager.NbdDeviceNotFound('/dev/nbd0') @@ -228,4 +246,3 @@ def test_disconnect_nbd_device_not_found(self, mock_wait_for_nbd, mock_call, moc mock_remove_persistent.assert_not_called() mock_call.assert_not_called() mock_wait_for_nbd.assert_not_called() - From e7e61b0cae254b14827463cc3353af9c857d9b19 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 7 Mar 2024 02:37:18 +0000 Subject: [PATCH 021/341] CP-47334: Adjust pytype config for new python3 directory Signed-off-by: Stephen Cheng --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index afc1ff32067..8b00e6402e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,6 @@ expected_to_fail = [ "scripts/hatests", "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", - "scripts/nbd_client_manager.py", # SSLSocket.send() only accepts bytes, not unicode string as argument: "scripts/examples/python/exportimport.py", # Other fixes needed: @@ -123,7 +122,8 @@ inputs = [ # Python 3 "python3/bin/hfx_filename", - "python3/*.py", + "python3/bin/*.py", + "python3/libexec/*.py", # To be added later, # when converted to Python3-compatible syntax: From 4cf9f3c8ab0e203c593ec53c5a51f849fcbcdacb Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 7 Mar 2024 06:01:34 +0000 Subject: [PATCH 022/341] CP-47334: Not using f-string During building, our build system uses both python2 and python3 to compile against .py files and f-string won't be accepted by py2 compiling. Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 10 +++++----- python3/unittest/test_nbd_client_manager.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index 5179ccc21cc..e251198c762 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -45,7 +45,7 @@ class NbdDeviceNotFound(Exception): def __init__(self, nbd_device): super().__init__( - f"NBD device '{nbd_device}' does not exist" + "NBD device '{}' does not exist".format(nbd_device) ) self.nbd_device = nbd_device @@ -131,7 +131,7 @@ def _find_unused_nbd_device(): Raises NbdDeviceNotFound if no devices are available. """ for device_no in range(0, 1000): - nbd_device = f"/dev/nbd{device_no}" + nbd_device = "/dev/nbd{}".format(device_no) if not _is_nbd_device_connected(nbd_device=nbd_device): return nbd_device # Actually `_is_nbd_device_connected` will raise an exception @@ -145,8 +145,8 @@ def _wait_for_nbd_device(nbd_device, connected): while _is_nbd_device_connected(nbd_device=nbd_device) != connected: if datetime.now() > deadline: raise NbdConnStateTimeout( - f"Timed out waiting for connection state of " - f"device {nbd_device} to be {connected}" + "Timed out waiting for connection state of device %s to be %s" + % (nbd_device, connected) ) LOGGER.debug( @@ -168,7 +168,7 @@ def _get_persistent_connect_info_filename(device): """ matched = re.search("/dev/nbd([0-9]+)", device) if not matched: - raise NotGetNbdNumber(f"Can not get the nbd number for device: {device}") + raise NotGetNbdNumber("Can not get the nbd number") number = matched.group(1) return PERSISTENT_INFO_DIR + "/" + number diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index 31f6d8083ca..0c06a4c258e 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -105,7 +105,7 @@ class TestGetPersistentConnectInfoFilename(unittest.TestCase): def test_get_persistent_connect_info_filename(self): # Test for device /dev/nbd0 device = "/dev/nbd0" - expected_filename = f"{nbd_client_manager.PERSISTENT_INFO_DIR}/0" + expected_filename = "/var/run/nonpersistent/nbd/0" self.assertEqual(nbd_client_manager._get_persistent_connect_info_filename(device), expected_filename) From 7719ef80d51df3b02ef748c38f8bea28c9214884 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 8 Mar 2024 01:44:31 +0000 Subject: [PATCH 023/341] CP-47334: Disable some pylint checks Signed-off-by: Stephen Cheng --- pyproject.toml | 1 + python3/unittest/test_nbd_client_manager.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8b00e6402e6..e7b587a89e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ disable = [ "import-error", "unnecessary-pass", "unspecified-encoding", + "protected-access", ] [tool.mypy] diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index 0c06a4c258e..8ead3cfa580 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -14,9 +14,6 @@ # mock modules to avoid dependencies sys.modules["XenAPI"] = MagicMock() -# pylint: disable=protected-access -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring @patch('subprocess.Popen') class TestCallFunction(unittest.TestCase): From 81205e110a0b5a8d1b75b1010567123791383e47 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 15 Mar 2024 02:50:18 +0000 Subject: [PATCH 024/341] CP-47334: Change to a more readable exception name Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index e251198c762..281c0ab3445 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -25,9 +25,9 @@ MAX_DEVICE_WAIT_MINUTES = 10 -class NotGetNbdNumber(Exception): +class InvalidNbdDevName(Exception): """ - The NBD device should be in this format: nbd{0-100} + The NBD device should be in this format: nbd{0-1000} If we cannot match this pattern, raise this exception """ @@ -168,7 +168,7 @@ def _get_persistent_connect_info_filename(device): """ matched = re.search("/dev/nbd([0-9]+)", device) if not matched: - raise NotGetNbdNumber("Can not get the nbd number") + raise InvalidNbdDevName("Can not get the nbd number") number = matched.group(1) return PERSISTENT_INFO_DIR + "/" + number From 2c6a0fc4a4e905e5db690f213b6d23413c0eb32c Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 15 Mar 2024 04:02:44 +0000 Subject: [PATCH 025/341] CP-47334: Raise exception if all nbd devices are connected Signed-off-by: Stephen Cheng --- python3/libexec/nbd_client_manager.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index 281c0ab3445..e30477316d8 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -134,10 +134,9 @@ def _find_unused_nbd_device(): nbd_device = "/dev/nbd{}".format(device_no) if not _is_nbd_device_connected(nbd_device=nbd_device): return nbd_device - # Actually `_is_nbd_device_connected` will raise an exception - # if no unused device - # Add this return for pylint check - return None + + # If there are 1000 nbd devices (unlikely) and all are connected + raise NbdDeviceNotFound(nbd_device) def _wait_for_nbd_device(nbd_device, connected): deadline = datetime.now() + timedelta(minutes=MAX_DEVICE_WAIT_MINUTES) From 7b7a1f0f5a62653f5b7dfe1704ddabe7d11c6e6f Mon Sep 17 00:00:00 2001 From: acefei Date: Mon, 25 Mar 2024 11:10:43 +0800 Subject: [PATCH 026/341] CP-48466 Fix ci warnings for usb_scan.py (#5511) Signed-off-by: Fei Su --- pyproject.toml | 3 +- python3/libexec/usb_scan.py | 2 +- python3/unittest/test_usb_scan.py | 87 ++++++++++++++----------------- 3 files changed, 42 insertions(+), 50 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e7b587a89e2..32bd0ad84d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,14 +40,13 @@ disable = [ "missing-module-docstring", "consider-using-f-string", "too-many-branches", + "too-many-arguments", "broad-exception-caught", "no-else-break", "no-else-return", "invalid-name", "import-error", "unnecessary-pass", - "unspecified-encoding", - "protected-access", ] [tool.mypy] diff --git a/python3/libexec/usb_scan.py b/python3/libexec/usb_scan.py index 187418741e6..e940aa626f5 100755 --- a/python3/libexec/usb_scan.py +++ b/python3/libexec/usb_scan.py @@ -374,7 +374,7 @@ def __init__(self): """ self.rule_list = [] try: - with open(self._PATH) as f: + with open(self._PATH, encoding="utf-8", errors="backslashreplace") as f: log.debug("=== policy file begin") for line in f: log.debug(line[0:-1]) diff --git a/python3/unittest/test_usb_scan.py b/python3/unittest/test_usb_scan.py index 150cc16afba..d87f9b12b27 100644 --- a/python3/unittest/test_usb_scan.py +++ b/python3/unittest/test_usb_scan.py @@ -12,12 +12,6 @@ import mock from import_file import get_module - -def nottest(obj): - obj.__test__ = False - return obj - - sys.modules["xcp"] = mock.Mock() sys.modules["xcp.logger"] = mock.Mock() sys.modules["pyudev"] = mock.Mock() @@ -82,24 +76,23 @@ def __init__(self, devices, interfaces): self.interfaces = interfaces def list_devices(self, **kwargs): - if kwargs.pop("subsystem") == "usb": - dev_type = kwargs.pop("DEVTYPE") - if dev_type == "usb_device": - return MocEnumerator(self.devices) - elif dev_type == "usb_interface": - return MocEnumerator(self.interfaces) - return MocEnumerator([]) + assert kwargs.pop("subsystem") == "usb" + dev_type = kwargs.pop("DEVTYPE") + if dev_type == "usb_device": + return MocEnumerator(self.devices) + elif dev_type == "usb_interface": + return MocEnumerator(self.interfaces) def mock_setup(mod, devices, interfaces, path): - mod.log.error = test_log - mod.log.debug = test_log + mod.log.error = verify_log + mod.log.debug = verify_log mod.Policy._PATH = path - mod.pyudev.Context = mock.Mock(return_value=MocContext(devices, interfaces)) + mod.pyudev.Context = mock.Mock( + return_value=MocContext(devices, interfaces)) -@nottest -def test_log(m): +def verify_log(_): pass @@ -110,9 +103,11 @@ def setUp(self): def tearDown(self): shutil.rmtree(self.work_dir, ignore_errors=True) - @nottest - def test_usb_common( - self, moc_devices, moc_interfaces, moc_results, path="./scripts/usb-policy.conf" + def verify_usb_common( + self, moc_devices, + moc_interfaces, + moc_results, + path="./scripts/usb-policy.conf" ): usb_scan = get_module("usb_scan", "../libexec/usb_scan.py") @@ -128,14 +123,13 @@ def test_usb_common( # pass pusbs in json to XAPI self.assertEqual(sorted(pusbs), sorted(moc_results)) - @nottest - def test_usb_exit( + def verify_usb_exit( self, devices, interfaces, results, path="./scripts/usb-policy.conf", msg="" - ): # pylint: disable=too-many-arguments + ): with self.assertRaises(SystemExit) as cm: - self.test_usb_common(devices, interfaces, results, path) + self.verify_usb_common(devices, interfaces, results, path) if msg: # cm.exception.code is int type whose format # looks like "duplicated tag'vid' found, @@ -183,7 +177,7 @@ def test_usb_dongle(self): "speed": "480", } ] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_dongle_on_hub(self): devices = [ @@ -226,7 +220,7 @@ def test_usb_dongle_on_hub(self): "speed": "12", } ] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_dongle_unbinded(self): devices = [ @@ -246,7 +240,7 @@ def test_usb_dongle_unbinded(self): ] interfaces = [] results = [] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_keyboard(self): devices = [ @@ -285,83 +279,82 @@ def test_usb_keyboard(self): }, ] results = [] - self.test_usb_common(devices, interfaces, results) + self.verify_usb_common(devices, interfaces, results) def test_usb_config_missing(self): - self.test_usb_exit([], [], [], "not_exist.conf") + self.verify_usb_exit([], [], [], "not_exist.conf") - @nottest - def test_usb_config_error_common(self, content, msg): + def verify_usb_config_error_common(self, content, msg): path = os.path.join(self.work_dir, "usb-policy.conf") with open(path, "w") as f: f.write(content) - self.test_usb_exit([], [], [], path, msg) + self.verify_usb_exit([], [], [], path, msg) def test_usb_config_error_unexpected_chars_with_comment(self): content = """ss# unexpected words with comment ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "to unpack") + self.verify_usb_config_error_common(content, "to unpack") def test_usb_config_error_duplicated_key(self): content = """# duplicated key word ALLOW:vid=056a vid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "duplicated tag") + self.verify_usb_config_error_common(content, "duplicated tag") def test_usb_config_error_invalid_key(self): content = """# invalid key word ALLOW:vid=056a psid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_hex_length_4(self): content = """# hex length not 4 ALLOW:vid=056a pid=031 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "length error") + self.verify_usb_config_error_common(content, "length error") def test_usb_config_error_hex_length_2(self): content = """# hex length not 2 DENY:vid=056a pid=0314 class=035 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "length error") + self.verify_usb_config_error_common(content, "length error") def test_usb_config_error_action_key(self): content = """# wrong action key word ALLOWED:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed action") + self.verify_usb_config_error_common(content, "Malformed action") def test_usb_config_error_unexpected_chars_end(self): content = """# unexpected words in the end ALLOW:vid=056a pid=0314 class=03 kk # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_unexpected_chars_beg(self): content = """# unexpected words at the beginning ii ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed action") + self.verify_usb_config_error_common(content, "Malformed action") def test_usb_config_error_unexpected_chars_mid(self): content = """# unexpected words in the middle ALLOW:vid=056a pid=0314 jj class=03 # Wacom Intuos tablet ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "Malformed policy rule, " - "unable to parse") + self.verify_usb_config_error_common( + content, "Malformed policy rule, unable to parse") def test_usb_config_error_unexpected_non_empty_line(self): content = """# unexpected non empty line @@ -369,11 +362,11 @@ def test_usb_config_error_unexpected_non_empty_line(self): aa ALLOW: # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "to unpack") + self.verify_usb_config_error_common(content, "to unpack") def test_usb_config_error_missing_colon(self): content = """# missing colon after action ALLOW:vid=056a pid=0314 class=03 # Wacom Intuos tablet ALLOW # Otherwise allow everything else """ - self.test_usb_config_error_common(content, "to unpack") + self.verify_usb_config_error_common(content, "to unpack") From acdab6bc723c547c8ab92d32b686764c7b213fb5 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 12 Mar 2024 07:26:49 +0000 Subject: [PATCH 027/341] CP-47653: Move perfmon.py to python3/bin Signed-off-by: Stephen Cheng --- python3/Makefile | 6 +++++- {scripts => python3/bin}/perfmon | 0 scripts/Makefile | 1 - 3 files changed, 5 insertions(+), 2 deletions(-) rename {scripts => python3/bin}/perfmon (100%) diff --git a/python3/Makefile b/python3/Makefile index e85e199f705..26e2bdfa943 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -10,7 +10,11 @@ install: mkdir -p $(DESTDIR)$(SITE3_DIR) mkdir -p $(DESTDIR)$(LIBEXECDIR) + $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ + $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) + + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/perfmon b/python3/bin/perfmon similarity index 100% rename from scripts/perfmon rename to python3/bin/perfmon diff --git a/scripts/Makefile b/scripts/Makefile index 51dc1b092f6..6a850199ba6 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -126,7 +126,6 @@ install: $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)/etc/sysconfig $(IPROG) sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon - $(IPROG) perfmon $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) From ce50f50b71312fd297be65bba78f9fb7031422b7 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 12 Mar 2024 08:20:28 +0000 Subject: [PATCH 028/341] CP-47653: Format the script using black and isort tools Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 961 ++++++++++++++++++++++++++++---------------- 1 file changed, 619 insertions(+), 342 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index a84c8eb5d61..3b68b485aa9 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -1,13 +1,13 @@ #!/usr/bin/env python # -# perfmon - a daemon for monitoring performance of the host on which it is run -# and of all the local VMs, and for generating events based on configurable +# perfmon - a daemon for monitoring performance of the host on which it is run +# and of all the local VMs, and for generating events based on configurable # triggers # # Notes: # ====== # The XAPI instance running on localhost monitors a number of variables -# for each VM running locally (i.e not on other pool members) and +# for each VM running locally (i.e not on other pool members) and # for the host itself. Each variable is stored in 16 RRDs (Round Robin Databases). # # Consolidation Number of samples in RRD @@ -17,7 +17,7 @@ # MAX 120 (10m) 120 (2h) ? ? # LAST 120 (10m) 120 (2h) ? ? # -# The "Consolidation function" tells how that RRD is built up from the +# The "Consolidation function" tells how that RRD is built up from the # one with the next highest sample rate. E.g. In the 1m/sample "AVERAGE" RRD # each sample is the average of 12 from the 1s/sample "AVERAGE" RRD, whereas # in the 1m/sample "MIN" RRD each sample is the minimum of 12 from the 1s/sample @@ -30,39 +30,47 @@ # The "cf" CGI param specfies the row. (All rows are returned if it's missing.) from __future__ import print_function -import sys -import os + +import commands +import gc import getopt +import os +import random +import re +import signal +import socket +import sys +import syslog +import time import traceback -import XenAPI import urllib -from xml import sax # used to parse rrd_updates because this may be large and sax is more efficient -from xml.dom import minidom # used to parse other-config:perfmon. Efficiency is less important than reliability here +# used to parse rrd_updates because this may be large and sax is more efficient +from xml import sax +# used to parse other-config:perfmon. Efficiency is less important than reliability here +from xml.dom import minidom from xml.parsers.expat import ExpatError -import time -import re -import random -import syslog -import socket -import gc -import signal -import commands + +import XenAPI + def print_debug(string): - if debug: + if debug: print("DEBUG:", string, file=sys.stderr) syslog.syslog(syslog.LOG_USER | syslog.LOG_INFO, "PERFMON(DEBUG): %s" % string) + def log_err(string): print(string, file=sys.stderr) syslog.syslog(syslog.LOG_USER | syslog.LOG_ERR, "PERFMON: %s" % string) pass + def log_info(string): print(string, file=sys.stderr) syslog.syslog(syslog.LOG_INFO | syslog.LOG_INFO, "PERFMON: %s" % string) pass + def debug_mem(): objCount = {} gc.collect() @@ -76,80 +84,100 @@ def debug_mem(): objCount[name] += 1 else: objCount[name] = 1 - + output = [] for name in objCount: output.append("%s :%s" % (name, objCount[name])) log_info("\n".join(output)) + class PerfMonException(Exception): pass + class XmlConfigException(PerfMonException): pass + class UsageException(Exception): pass # Start a session with the master of a pool. # Note: when calling http://localhost/rrd_update we must pass the session -# ID as a param. The host then uses this to verify our validity with +# ID as a param. The host then uses this to verify our validity with # the master before responding. # If the verification fails we should get a 401 response class XapiSession(XenAPI.Session): - """ Object that represents a XenAPI session with the pool master - One of these is needed to refresh a VMMonitor or HOSTMonitor config, or + """Object that represents a XenAPI session with the pool master + One of these is needed to refresh a VMMonitor or HOSTMonitor config, or to refresh an RRDUpdates object """ + def __init__(self): - XenAPI.Session.__init__(self, "http://_var_xapi_xapi", transport=XenAPI.UDSTransport()) + XenAPI.Session.__init__( + self, "http://_var_xapi_xapi", transport=XenAPI.UDSTransport() + ) self.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-perfmon") - def __del__ (self): + + def __del__(self): self.xenapi.session.logout() + def id(self): return self._session + class ObjectReport: def __init__(self, objtype, uuid): - self.objtype = objtype # a string like "vm", or "host" taken from an tag - self.uuid = uuid # the object's uuid - self.vars = {} # maps rrd variable name to array of floats + self.objtype = ( + objtype # a string like "vm", or "host" taken from an tag + ) + self.uuid = uuid # the object's uuid + self.vars = {} # maps rrd variable name to array of floats + def get_uuid(self): return self.uuid + def get_var_names(self): return self.vars.keys() + def get_value(self, var_name, row): try: return (self.vars[var_name])[row] except: return 0.0 + def insert_value(self, var_name, index, value): if var_name not in self.vars: self.vars[var_name] = [] self.vars[var_name].insert(index, value) + class RRDReport: "This is just a data structure passed that is completed by RRDContentHandler" + def __init__(self): self.reset() - + def reset(self): - self.columns = 0 # num xapi vars in xml - self.rows = 0 # num samples in xml - self.start_time = 0 # timestamp of 1st sample in xml - self.end_time = 0 # timestamp of last sample in xml - self.step_time = 0 # seconds between each pair of samples - self.obj_reports = {} # maps uuids to ObjectReports, built from xml + self.columns = 0 # num xapi vars in xml + self.rows = 0 # num samples in xml + self.start_time = 0 # timestamp of 1st sample in xml + self.end_time = 0 # timestamp of last sample in xml + self.step_time = 0 # seconds between each pair of samples + self.obj_reports = {} # maps uuids to ObjectReports, built from xml + class RRDColumn: "class used internally by RRDContentHandler" + def __init__(self, paramname, obj_report): self.paramname = paramname self.obj_report = obj_report + class RRDContentHandler(sax.ContentHandler): - """ Handles data in this format: + """Handles data in this format: INTEGER @@ -177,6 +205,7 @@ class RRDContentHandler(sax.ContentHandler): """ + def __init__(self, report): "report is saved and later updated by this object. report should contain defaults already" self.report = report @@ -190,63 +219,66 @@ class RRDContentHandler(sax.ContentHandler): self.in_row_tag = False self.column_details = [] self.row = 0 - + def startElement(self, name, attrs): self.raw_text = "" - if name == 'start': + if name == "start": self.in_start_tag = True - elif name == 'step': + elif name == "step": self.in_step_tag = True - elif name == 'end': + elif name == "end": self.in_end_tag = True - elif name == 'rows': + elif name == "rows": self.in_rows_tag = True - elif name == 'columns': + elif name == "columns": self.in_columns_tag = True - elif name == 'entry': + elif name == "entry": self.in_entry_tag = True - elif name == 'row': + elif name == "row": self.in_row_tag = True self.col = 0 if self.in_row_tag: - if name == 't': + if name == "t": self.in_t_tag = True - elif name == 'v': + elif name == "v": self.in_v_tag = True - + def characters(self, chars): - if (self.in_start_tag or - self.in_step_tag or - self.in_end_tag or - self.in_rows_tag or - self.in_columns_tag or - self.in_entry_tag or - #self.in_row_tag # ignore text under row tag, s are just for holding and nodes - self.in_t_tag or - self.in_v_tag): + if ( + self.in_start_tag + or self.in_step_tag + or self.in_end_tag + or self.in_rows_tag + or self.in_columns_tag + or self.in_entry_tag + or + # self.in_row_tag # ignore text under row tag, s are just for holding and nodes + self.in_t_tag + or self.in_v_tag + ): self.raw_text += chars - + def endElement(self, name): - if name == 'start': + if name == "start": # This overwritten later if there are any rows self.report.start_time = int(self.raw_text) self.in_start_tag = False - elif name == 'step': + elif name == "step": self.report.step_time = int(self.raw_text) self.in_step_tag = False - elif name == 'end': + elif name == "end": # This overwritten later if there are any rows self.report.end_time = int(self.raw_text) self.in_end_tag = False - elif name == 'rows': + elif name == "rows": self.report.rows = int(self.raw_text) self.in_rows_tag = False - elif name == 'columns': + elif name == "columns": self.report.columns = int(self.raw_text) self.in_columns_tag = False - elif name == 'entry': - (_, objtype, uuid, paramname) = self.raw_text.split(':') + elif name == "entry": + (_, objtype, uuid, paramname) = self.raw_text.split(":") # lookup the obj_report corresponding to this uuid, or create if it does not exist if uuid not in self.report.obj_reports: self.report.obj_reports[uuid] = ObjectReport(objtype, uuid) @@ -255,10 +287,10 @@ class RRDContentHandler(sax.ContentHandler): # save the details of this column self.column_details.append(RRDColumn(paramname, obj_report)) self.in_entry_tag = False - elif name == 'row': + elif name == "row": self.in_row_tag = False self.row += 1 - elif name == 't': + elif name == "t": # Extract start and end time from row data as it's more reliable than the values in the meta data t = int(self.raw_text) # Last row corresponds to start time @@ -269,50 +301,53 @@ class RRDContentHandler(sax.ContentHandler): self.in_t_tag = False - elif name == 'v': + elif name == "v": v = float(self.raw_text) # Find object report and paramname for this col col_details = self.column_details[self.col] obj_report = col_details.obj_report paramname = col_details.paramname - + # Update object_report - obj_report.insert_value(paramname, index=0, value=v) # use index=0 as this is the earliest sample so far - + obj_report.insert_value( + paramname, index=0, value=v + ) # use index=0 as this is the earliest sample so far + # Update position in row self.col += 1 - + self.in_t_tag = False - - + # An object of this class should persist the lifetime of the program class RRDUpdates: - """ Object used to get and parse the output the http://localhost/rrd_udpates?... - """ + """Object used to get and parse the output the http://localhost/rrd_udpates?...""" + def __init__(self): # params are what get passed to the CGI executable in the URL self.params = dict() - self.params['start'] = int(time.time()) - interval # interval seconds ago - self.params['host'] = 'true' # include data for host (as well as for VMs) - self.params['sr_uuid'] = 'all' # include data for all SRs attached to this host - self.params['cf'] = 'AVERAGE' # consolidation function, each sample averages 12 from the 5 second RRD - self.params['interval'] = str(rrd_step) # distinct from the perfmon interval - self.report = RRDReport() # data structure updated by RRDContentHandler + self.params["start"] = int(time.time()) - interval # interval seconds ago + self.params["host"] = "true" # include data for host (as well as for VMs) + self.params["sr_uuid"] = "all" # include data for all SRs attached to this host + self.params["cf"] = ( + "AVERAGE" # consolidation function, each sample averages 12 from the 5 second RRD + ) + self.params["interval"] = str(rrd_step) # distinct from the perfmon interval + self.report = RRDReport() # data structure updated by RRDContentHandler def __repr__(self): - return '' % str(self.params) - - def refresh(self, session, override_params = {}): + return "" % str(self.params) + + def refresh(self, session, override_params={}): "reread the rrd_updates over CGI and parse" params = override_params - params['session_id'] = session.id() + params["session_id"] = session.id() params.update(self.params) - paramstr = "&".join(["%s=%s" % (k,params[k]) for k in params]) + paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params]) print_debug("Calling http://localhost/rrd_updates?%s" % paramstr) - # this is better than urllib.urlopen() as it raises an Exception on http 401 'Unauthorised' error + # this is better than urllib.urlopen() as it raises an Exception on http 401 'Unauthorised' error # rather than drop into interactive mode sock = urllib.URLopener().open("http://localhost/rrd_updates?%s" % paramstr) xmlsource = sock.read() @@ -323,10 +358,14 @@ class RRDUpdates: sax.parseString(xmlsource, RRDContentHandler(self.report)) # Update the time used on the next run - self.params['start'] = self.report.end_time + 1 # avoid retrieving same data twice + self.params["start"] = ( + self.report.end_time + 1 + ) # avoid retrieving same data twice - print_debug("Refreshed rrd_updates, start = %d, end = %d, rows = %d" % \ - (self.report.start_time, self.report.end_time, self.report.rows)) + print_debug( + "Refreshed rrd_updates, start = %d, end = %d, rows = %d" + % (self.report.start_time, self.report.end_time, self.report.rows) + ) def get_num_rows(self): "Return the number of samples of each parameter" @@ -338,102 +377,138 @@ class RRDUpdates: return self.report.obj_reports[uuid] except: return None - + def get_uuid_list_by_objtype(self, objtype): "Return a list of uuids corresonding to the objects of this type for which we have ObjectReports" - return [ objrep.uuid - for objrep in self.report.obj_reports.values() - if objrep.objtype == objtype ] + return [ + objrep.uuid + for objrep in self.report.obj_reports.values() + if objrep.objtype == objtype + ] # Consolidation functions: -supported_consolidation_functions = [ 'sum', 'average', 'max', 'get_percent_fs_usage', 'get_percent_log_fs_usage', 'get_percent_mem_usage', 'get_percent_sr_usage' ] +supported_consolidation_functions = [ + "sum", + "average", + "max", + "get_percent_fs_usage", + "get_percent_log_fs_usage", + "get_percent_mem_usage", + "get_percent_sr_usage", +] + def average(mylist): - return sum(mylist)/float(len(mylist)) + return sum(mylist) / float(len(mylist)) + def get_percent_log_fs_usage(ignored): "Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty" - fs_output = commands.getoutput('df /etc/passwd') - log_fs_output = commands.getoutput('df /var/log') - fs_output = ' '.join(fs_output.splitlines()[1:]) - log_fs_output = ' '.join(log_fs_output.splitlines()[1:]) + fs_output = commands.getoutput("df /etc/passwd") + log_fs_output = commands.getoutput("df /var/log") + fs_output = " ".join(fs_output.splitlines()[1:]) + log_fs_output = " ".join(log_fs_output.splitlines()[1:]) # Get the percent usage only when there is a separate logs partition - if (fs_output.split()[0] != log_fs_output.split()[0]): + if fs_output.split()[0] != log_fs_output.split()[0]: percentage = log_fs_output.split()[4] # remove % character and convert to float - return float(percentage[0:-1])/100.0 + return float(percentage[0:-1]) / 100.0 else: - return float('NaN') + return float("NaN") + def get_percent_fs_usage(ignored): "Get the percent usage of the host filesystem. Input list is ignored and should be empty" # this file is on the filesystem of interest in both OEM and Retail - output = commands.getoutput('df /etc/passwd') - output = ' '.join(output.splitlines()[1:]) # remove header line and rewrap on single line + output = commands.getoutput("df /etc/passwd") + output = " ".join( + output.splitlines()[1:] + ) # remove header line and rewrap on single line percentage = output.split()[4] # remove % character and convert to float - return float(percentage[0:-1])/100.0 + return float(percentage[0:-1]) / 100.0 + def get_percent_mem_usage(ignored): "Get the percent usage of Dom0 memory/swap. Input list is ignored and should be empty" try: - memfd = open('/proc/meminfo', 'r') + memfd = open("/proc/meminfo", "r") memlist = memfd.readlines() memfd.close() - memdict = [ m.split(':', 1) for m in memlist ] - memdict = dict([(k.strip(), float(re.search('\d+', v.strip()).group(0))) for (k,v) in memdict]) + memdict = [m.split(":", 1) for m in memlist] + memdict = dict( + [ + (k.strip(), float(re.search("\d+", v.strip()).group(0))) + for (k, v) in memdict + ] + ) # We consider the sum of res memory and swap in use as the hard demand # of mem usage, it is bad if this number is beyond the physical mem, as # in such case swapping is obligatory rather than voluntary, hence # degrading the performance. We define the percentage metrics as # (res_mem + swap_in_use) / phy_mem, which could potentially go beyond # 100% (but is considered bad when it does) - mem_in_use = memdict['MemTotal'] - memdict['MemFree'] - memdict['Buffers'] - memdict['Cached'] - swap_in_use = memdict['SwapTotal'] - memdict['SwapFree'] - return float(mem_in_use + swap_in_use) / memdict['MemTotal'] + mem_in_use = ( + memdict["MemTotal"] + - memdict["MemFree"] + - memdict["Buffers"] + - memdict["Cached"] + ) + swap_in_use = memdict["SwapTotal"] - memdict["SwapFree"] + return float(mem_in_use + swap_in_use) / memdict["MemTotal"] except Exception as e: log_err("Error %s in get_percent_mem_usage, return 0.0 instead" % e) return 0.0 + def get_percent_sr_usage(mylist): """Get the percent usage of the SR. Input list should be exactly two items: [physical_utilisation, size]""" try: if len(mylist) != 2: - raise Exception("Incorrect number of values to consolidate: %d (exactly 2 values)" % len(mylist)) + raise Exception( + "Incorrect number of values to consolidate: %d (exactly 2 values)" + % len(mylist) + ) physical_utilisation, size = mylist[0:2] return float(physical_utilisation) / size except Exception as e: log_err("Error %s in get_percent_sr_usage, return 0.0 instead" % e) return 0.0 + class VariableConfig: """Object storing the configuration of a Variable - + Initialisation parameters: xmldoc = dom object representing the nodes in the ObjectMonitor config strings. See VMMonitor.__doc__ and HOSTMonitor.__doc__ - alarm_create_callback = + alarm_create_callback = callback called by Variable.update() to create and send an alarm - get_default_variable_config = + get_default_variable_config = a function that VariableConfig.__init__() uses to lookup default tag values by variable name """ + def __init__(self, xmldoc, alarm_create_callback, get_default_variable_config): - try: name = xmldoc.getElementsByTagName('name')[0].getAttribute('value') - except IndexError: raise XmlConfigException("variable missing 'name' tag") - def get_value(tag): + try: + name = xmldoc.getElementsByTagName("name")[0].getAttribute("value") + except IndexError: + raise XmlConfigException("variable missing 'name' tag") + + def get_value(tag): try: - return xmldoc.getElementsByTagName(tag)[0].getAttribute('value') + return xmldoc.getElementsByTagName(tag)[0].getAttribute("value") except: return get_default_variable_config(name, tag) - rrd_regex = get_value('rrd_regex') - consolidation_fn = get_value('consolidation_fn') - alarm_trigger_level = get_value('alarm_trigger_level') - alarm_trigger_period = get_value('alarm_trigger_period') - alarm_auto_inhibit_period = get_value('alarm_auto_inhibit_period') - alarm_trigger_sense = get_value('alarm_trigger_sense') - alarm_priority = get_value('alarm_priority') + + rrd_regex = get_value("rrd_regex") + consolidation_fn = get_value("consolidation_fn") + alarm_trigger_level = get_value("alarm_trigger_level") + alarm_trigger_period = get_value("alarm_trigger_period") + alarm_auto_inhibit_period = get_value("alarm_auto_inhibit_period") + alarm_trigger_sense = get_value("alarm_trigger_sense") + alarm_priority = get_value("alarm_priority") # Save xmldoc: we need this when creating the body of the alarms self.xmldoc = xmldoc @@ -442,54 +517,68 @@ class VariableConfig: try: self.rrd_regex = re.compile("^%s$" % rrd_regex) except: - raise XmlConfigException("variable %s: regex %s does not compile" % (name, rrd_regex)) + raise XmlConfigException( + "variable %s: regex %s does not compile" % (name, rrd_regex) + ) if consolidation_fn not in supported_consolidation_functions: - raise XmlConfigException("variable %s: consolidation function %s not supported" \ - % (name, consolidation_fn)) + raise XmlConfigException( + "variable %s: consolidation function %s not supported" + % (name, consolidation_fn) + ) self.consolidation_fn = eval(consolidation_fn) try: self.alarm_trigger_period = int(alarm_trigger_period) except: - raise XmlConfigException("variable %s: alarm_trigger_period %s not an int" % \ - (name, alarm_trigger_period)) + raise XmlConfigException( + "variable %s: alarm_trigger_period %s not an int" + % (name, alarm_trigger_period) + ) try: self.alarm_auto_inhibit_period = int(alarm_auto_inhibit_period) except: - raise XmlConfigException("variable %s: alarm_auto_inhibit_period %s not an int" % \ - (name, alarm_auto_inhibit_period)) + raise XmlConfigException( + "variable %s: alarm_auto_inhibit_period %s not an int" + % (name, alarm_auto_inhibit_period) + ) try: trigger_level = float(alarm_trigger_level) except: - raise XmlConfigException("variable %s: alarm_trigger_level %s not a float" % \ - (name, alarm_trigger_level)) + raise XmlConfigException( + "variable %s: alarm_trigger_level %s not a float" + % (name, alarm_trigger_level) + ) self.alarm_priority = alarm_priority - + if alarm_trigger_sense == "high": - self.test_level = lambda : (self.value > trigger_level) + self.test_level = lambda: (self.value > trigger_level) else: - self.test_level = lambda : (self.value < trigger_level) + self.test_level = lambda: (self.value < trigger_level) self.alarm_create_callback = alarm_create_callback + def variable_configs_differ(vc1, vc2): "Say whether configuration of one variable differs from that of another" return vc1.xmldoc.toxml() != vc2.xmldoc.toxml() + class VariableState: - """ Object storing the state of a Variable - """ + """Object storing the state of a Variable""" + def __init__(self): self.value = None self.timeof_last_alarm = time.time() - self.alarm_auto_inhibit_period self.trigger_down_counter = self.alarm_trigger_period + class Variable(VariableConfig, VariableState): - """ Variable() is used by ObjectMonitor to create one Variable object for each + """Variable() is used by ObjectMonitor to create one Variable object for each variable specified in it's config string """ + def __init__(self, *args): VariableConfig.__init__(self, *args) VariableState.__init__(self) @@ -497,32 +586,43 @@ class Variable(VariableConfig, VariableState): print_debug("Created Variable %s" % self.name) def set_active(self, active): - print_debug("set_active on %s. (old, new) = (%s, %s)" % (self.name, self.active, active)) + print_debug( + "set_active on %s. (old, new) = (%s, %s)" % (self.name, self.active, active) + ) if active == self.active: - return # nothing to do + return # nothing to do self.active = active if active: - VariableState.__init__(self) # reset when reactivating + VariableState.__init__(self) # reset when reactivating def __generate_alarm(self, session): - """ Generate an alarm using callback provided by creator - - ... provided that one has not been generated in the last + """Generate an alarm using callback provided by creator + + ... provided that one has not been generated in the last self.alarm_auto_inhibit_period seconds """ t = time.time() delta = t - self.timeof_last_alarm - print_debug("Time since last alarm for var %s is %d - %d = %d. Refractory period = %d." % (self.name, t, self.timeof_last_alarm, delta, self.alarm_auto_inhibit_period)) + print_debug( + "Time since last alarm for var %s is %d - %d = %d. Refractory period = %d." + % ( + self.name, + t, + self.timeof_last_alarm, + delta, + self.alarm_auto_inhibit_period, + ) + ) if delta < self.alarm_auto_inhibit_period: - return # we are in the auto inhibit period - do nothing + return # we are in the auto inhibit period - do nothing self.timeof_last_alarm = t message = "value: %f\nconfig:\n%s" % (self.value, self.xmldoc.toprettyxml()) - + self.alarm_create_callback(self, session, message) def update(self, value, session): """Update the value of the variable using an RRDUpdates object - + Calls self.__generate_alarm() if level has been 'bad' for more than self.alarm_trigger_period seconds """ @@ -538,35 +638,40 @@ class Variable(VariableConfig, VariableState): else: # level good - reset trigger counter self.trigger_down_counter = self.alarm_trigger_period - + class ObjectMonitor: """Abstract class, used as base for VMMonitor and HOSTMonitor - + Public attributes are uuid, refresh_config() Inherited classes must implement a public attribute process_rrd_updates() """ + def __init__(self, uuid): - self.uuid = uuid + self.uuid = uuid self.xmlconfig = None # "variables" is the public attribute of interest self.variables = [] self.refresh_config() - + def refresh_config(self): if self.__update_xmlconfig(): # config has changed - reparse it try: self.__parse_xmlconfig() except XmlConfigException as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s config error: %s" % (self.monitortype, self.uuid, errmsg)) + errmsg = "\n".join([str(x) for x in e.args]) + log_err( + "%s %s config error: %s" % (self.monitortype, self.uuid, errmsg) + ) except ExpatError as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s XML parse error: %s" % (self.monitortype, self.uuid, errmsg)) + errmsg = "\n".join([str(x) for x in e.args]) + log_err( + "%s %s XML parse error: %s" % (self.monitortype, self.uuid, errmsg) + ) return True else: - return False # config unchanged + return False # config unchanged def __update_xmlconfig(self): if self.uuid not in all_xmlconfigs: @@ -578,16 +683,16 @@ class ObjectMonitor: self.xmlconfig = xmlconfig changed = True return changed - + def __parse_xmlconfig(self): if not self.xmlconfig: # Possible if this VM/host is not configured yet self.variables = [] return xmldoc = minidom.parseString(self.xmlconfig) - variable_nodes = xmldoc.getElementsByTagName('variable') + variable_nodes = xmldoc.getElementsByTagName("variable") variable_names = [] - + for vn in variable_nodes: # create a variable using the config in vn var = Variable(vn, self.alarm_create, self.get_default_variable_config) @@ -597,13 +702,16 @@ class ObjectMonitor: variable_names.append(var.name) # build list of variables already present with same name - vars_with_same_name = [ v for v in self.variables if v.name == var.name ] + vars_with_same_name = [v for v in self.variables if v.name == var.name] count = 0 append_var = True for v in vars_with_same_name: # this list should be 0 or 1 long! - if count > 0: - log_err("programmer error: found duplicate variable %s (uuid %s)" % (var.name, self.uuid)) + if count > 0: + log_err( + "programmer error: found duplicate variable %s (uuid %s)" + % (var.name, self.uuid) + ) self.variables.remove(v) continue count += 1 @@ -614,35 +722,46 @@ class ObjectMonitor: self.variables.remove(v) else: append_var = False - + if append_var: - print_debug("Appending %s to list of variables for %s UUID=%s" % (var.name, self.monitortype, self.uuid)) + print_debug( + "Appending %s to list of variables for %s UUID=%s" + % (var.name, self.monitortype, self.uuid) + ) self.variables.append(var) # Now delete any old variables that do not appear in the new variable_nodes - variables_to_remove = [ v for v in self.variables if v.name not in variable_names ] + variables_to_remove = [ + v for v in self.variables if v.name not in variable_names + ] for v in variables_to_remove: - print_debug("Deleting %s from list of variables for UUID=%s" % (v.name, self.uuid)) + print_debug( + "Deleting %s from list of variables for UUID=%s" % (v.name, self.uuid) + ) self.variables.remove(v) - def get_active_variables(self): return self.variables def process_rrd_updates(self, rrd_updates, session): - print_debug("%sMonitor processing rrd_updates for %s" % (self.monitortype, self.uuid)) + print_debug( + "%sMonitor processing rrd_updates for %s" % (self.monitortype, self.uuid) + ) obj_report = rrd_updates.get_obj_report_by_uuid(self.uuid) - num_rows = rrd_updates.get_num_rows() + num_rows = rrd_updates.get_num_rows() if not obj_report: return params_in_obj_report = obj_report.get_var_names() - + for var in self.get_active_variables(): # find the subset of the params returned for this object that we need to consolidate into var params_to_consolidate = filter(var.rrd_regex.match, params_in_obj_report) for row in range(num_rows): # Get the values to consolidate - values_to_consolidate = map(lambda param: obj_report.get_value(param, row), params_to_consolidate) + values_to_consolidate = map( + lambda param: obj_report.get_value(param, row), + params_to_consolidate, + ) # Consolidate them value = var.consolidation_fn(values_to_consolidate) # Pass result on to the variable object - this may result in an alarm being generated @@ -650,9 +769,15 @@ class ObjectMonitor: def alarm_create(self, var, session, message): "Callback used by Variable var to actually send an alarm" - print_debug("Creating an alarm for %s %s, message: %s" % (self.monitortype, self.uuid, message)) - session.xenapi.message.create("ALARM", var.alarm_priority, self.monitortype, self.uuid, message) - + print_debug( + "Creating an alarm for %s %s, message: %s" + % (self.monitortype, self.uuid, message) + ) + session.xenapi.message.create( + "ALARM", var.alarm_priority, self.monitortype, self.uuid, message + ) + + class VMMonitor(ObjectMonitor): """Object that maintains state of one VM @@ -674,6 +799,7 @@ class VMMonitor(ObjectMonitor): * rrd_regex matches the names of variables from (xe vm-data-sources-list uuid=$vmuuid) used to compute value (only has defaults for "cpu_usage", "network_usage", and "disk_usage") """ + def __init__(self, *args): self.monitortype = "VM" ObjectMonitor.__init__(self, *args) @@ -681,33 +807,66 @@ class VMMonitor(ObjectMonitor): def get_default_variable_config(self, variable_name, config_tag): "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == "cpu_usage": return 'average' - elif variable_name == "fs_usage": return 'get_percent_fs_usage' - elif variable_name == "log_fs_usage": return 'get_percent_log_fs_usage' - elif variable_name == "mem_usage": return 'get_percent_mem_usage' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == "cpu_usage": return "cpu[0-9]+" - elif variable_name == "network_usage": return "vif_[0-9]+_[rt]x" - elif variable_name == "disk_usage": return "vbd_(xvd|hd)[a-z]+_(read|write)" - elif variable_name == "fs_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "log_fs_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "mem_usage": return "_$_DUMMY__" # match nothing - elif variable_name == "memory_internal_free": return variable_name - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_level': - if variable_name == "fs_usage": return '0.9' # trigger when 90% full - elif variable_name == "log_fs_usage": return '0.9' # trigger when 90% full - elif variable_name == "mem_usage": return '0.95' # tigger when mem demanded is close to phy_mem - else:raise XmlConfigException("variable %s: no default alarm_trigger_level - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_sense': - if variable_name == "memory_internal_free": return "low" - else: return 'high' # trigger if *above* - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) + if config_tag == "consolidation_fn": + if variable_name == "cpu_usage": + return "average" + elif variable_name == "fs_usage": + return "get_percent_fs_usage" + elif variable_name == "log_fs_usage": + return "get_percent_log_fs_usage" + elif variable_name == "mem_usage": + return "get_percent_mem_usage" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "cpu_usage": + return "cpu[0-9]+" + elif variable_name == "network_usage": + return "vif_[0-9]+_[rt]x" + elif variable_name == "disk_usage": + return "vbd_(xvd|hd)[a-z]+_(read|write)" + elif variable_name == "fs_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "log_fs_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "mem_usage": + return "_$_DUMMY__" # match nothing + elif variable_name == "memory_internal_free": + return variable_name + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_level": + if variable_name == "fs_usage": + return "0.9" # trigger when 90% full + elif variable_name == "log_fs_usage": + return "0.9" # trigger when 90% full + elif variable_name == "mem_usage": + return "0.95" # tigger when mem demanded is close to phy_mem + else: + raise XmlConfigException( + "variable %s: no default alarm_trigger_level - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_sense": + if variable_name == "memory_internal_free": + return "low" + else: + return "high" # trigger if *above* + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) + class SRMonitor(ObjectMonitor): """Object that maintains state of one SR @@ -730,6 +889,7 @@ class SRMonitor(ObjectMonitor): * rrd_regex matches the names of variables from (xe sr-data-sources-list uuid=$sruuid) used to compute value (has default for "physical_utilistaion") """ + def __init__(self, *args): self.monitortype = "SR" ObjectMonitor.__init__(self, *args) @@ -737,21 +897,43 @@ class SRMonitor(ObjectMonitor): def get_default_variable_config(self, variable_name, config_tag): "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == 'physical_utilisation': return 'get_percent_sr_usage' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == 'physical_utilisation': return 'physical_utilisation|size' - elif variable_name == "sr_io_throughput_total_per_host": return '_$_DUMMY__' # (these are to drive Host RRDs and so are handled by the HOSTMonitor) - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_level': - if variable_name == "physical_utilistaion": return '0.8' # trigger when 80% full - else:raise XmlConfigException("variable %s: no default alarm_trigger_level - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_sense': return 'high' # trigger if *above* - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) + if config_tag == "consolidation_fn": + if variable_name == "physical_utilisation": + return "get_percent_sr_usage" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "physical_utilisation": + return "physical_utilisation|size" + elif variable_name == "sr_io_throughput_total_per_host": + return "_$_DUMMY__" # (these are to drive Host RRDs and so are handled by the HOSTMonitor) + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_level": + if variable_name == "physical_utilistaion": + return "0.8" # trigger when 80% full + else: + raise XmlConfigException( + "variable %s: no default alarm_trigger_level - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_sense": + return "high" # trigger if *above* + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) + class HOSTMonitor(ObjectMonitor): """Object that maintains state of one Host @@ -783,35 +965,58 @@ class HOSTMonitor(ObjectMonitor): This only works for that one specific variable-name, and rrd_regex must not be specified. Configuration done on the host directly (variable-name sr_io_throughput_total_xxxxxxxx) takes priority. """ + def __init__(self, *args): self.monitortype = "Host" self.secondary_variables = set() - self.secondary_xmlconfigs = {} # map of sr uuid to xml text + self.secondary_xmlconfigs = {} # map of sr uuid to xml text ObjectMonitor.__init__(self, *args) print_debug("Created HOSTMonitor with uuid %s" % self.uuid) def get_default_variable_config(self, variable_name, config_tag): "This allows user to not specify full set of tags for each variable in xml config" - if config_tag == 'consolidation_fn': - if variable_name == "cpu_usage": return 'average' - else: return 'sum' - elif config_tag == 'rrd_regex': - if variable_name == "cpu_usage": return "cpu[0-9]+" - elif variable_name == "network_usage": return "pif_eth[0-9]+_[rt]x" - elif variable_name == "memory_free_kib": return variable_name - elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", variable_name): return variable_name[3:] - else:raise XmlConfigException("variable %s: no default rrd_regex - please specify one" % variable_name) - elif config_tag == 'alarm_trigger_period': return '60' # 1 minute - elif config_tag == 'alarm_auto_inhibit_period': return '3600' # 1 hour - elif config_tag == 'alarm_trigger_sense': - if variable_name == "memory_free_kib": return "low" - else: return 'high' # trigger if *above* level - elif config_tag == 'alarm_priority': return '3' # Service degradation level defined in PR-1455 - else:raise XmlConfigException("variable %s: no default available for tag %s" % (variable_name, config_tag)) + if config_tag == "consolidation_fn": + if variable_name == "cpu_usage": + return "average" + else: + return "sum" + elif config_tag == "rrd_regex": + if variable_name == "cpu_usage": + return "cpu[0-9]+" + elif variable_name == "network_usage": + return "pif_eth[0-9]+_[rt]x" + elif variable_name == "memory_free_kib": + return variable_name + elif re.match("sr_io_throughput_total_[0-9a-f]{8}$", variable_name): + return variable_name[3:] + else: + raise XmlConfigException( + "variable %s: no default rrd_regex - please specify one" + % variable_name + ) + elif config_tag == "alarm_trigger_period": + return "60" # 1 minute + elif config_tag == "alarm_auto_inhibit_period": + return "3600" # 1 hour + elif config_tag == "alarm_trigger_sense": + if variable_name == "memory_free_kib": + return "low" + else: + return "high" # trigger if *above* level + elif config_tag == "alarm_priority": + return "3" # Service degradation level defined in PR-1455 + else: + raise XmlConfigException( + "variable %s: no default available for tag %s" + % (variable_name, config_tag) + ) def get_active_variables(self): r = self.variables + [v for v in self.secondary_variables if v.active] - print_debug("Returning active variables: %d main, %d total" % (len(self.variables), len(r))) + print_debug( + "Returning active variables: %d main, %d total" + % (len(self.variables), len(r)) + ) return r def refresh_config(self): @@ -828,8 +1033,8 @@ class HOSTMonitor(ObjectMonitor): return secondary_changed = False - old_sruuids = set(self.secondary_xmlconfigs) # create set of keys - current_sruuids = sruuids_by_hostuuid[self.uuid] # a set already + old_sruuids = set(self.secondary_xmlconfigs) # create set of keys + current_sruuids = sruuids_by_hostuuid[self.uuid] # a set already if old_sruuids != current_sruuids: print_debug("Changed set of perfmon sruuids for host %s" % self.uuid) secondary_changed = True @@ -838,10 +1043,15 @@ class HOSTMonitor(ObjectMonitor): sr_xmlconfig = all_xmlconfigs[sruuid] # As an optimisation, if xml unchanged then do not re-parse. # Otherwise we would create Variables which would turn out to be same as existing ones so we would ignore them. - if sruuid in self.secondary_xmlconfigs and self.secondary_xmlconfigs[sruuid] == sr_xmlconfig: + if ( + sruuid in self.secondary_xmlconfigs + and self.secondary_xmlconfigs[sruuid] == sr_xmlconfig + ): print_debug("Unchanged sr_xmlconfig for sruuid %s" % sruuid) else: - print_debug("Found new/different sr_xmlconfig for sruuid %s" % sruuid) + print_debug( + "Found new/different sr_xmlconfig for sruuid %s" % sruuid + ) secondary_changed = True break @@ -849,11 +1059,17 @@ class HOSTMonitor(ObjectMonitor): try: self.__parse_secondary_xmlconfigs() except XmlConfigException as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s secondary config error: %s" % (self.monitortype, self.uuid, errmsg)) + errmsg = "\n".join([str(x) for x in e.args]) + log_err( + "%s %s secondary config error: %s" + % (self.monitortype, self.uuid, errmsg) + ) except ExpatError as e: - errmsg = "\n".join([ str(x) for x in e.args ]) - log_err("%s %s secondary XML parse error: %s" % (self.monitortype, self.uuid, errmsg)) + errmsg = "\n".join([str(x) for x in e.args]) + log_err( + "%s %s secondary XML parse error: %s" + % (self.monitortype, self.uuid, errmsg) + ) if main_changed or secondary_changed: # Calculate which secondary variables are active, i.e. not overridden by ones configured on the host rather than the SR. @@ -862,67 +1078,102 @@ class HOSTMonitor(ObjectMonitor): v.set_active(v.name not in main_names) def __parse_secondary_xmlconfigs(self): - variable_names = set() # Names of the Variable objects we create based on the xml nodes we find + variable_names = ( + set() + ) # Names of the Variable objects we create based on the xml nodes we find self.secondary_xmlconfigs.clear() for sruuid in sruuids_by_hostuuid[self.uuid]: print_debug("Looking for config on SR uuid %s" % sruuid) sr_xmlconfig = all_xmlconfigs[sruuid] self.secondary_xmlconfigs[sruuid] = sr_xmlconfig xmldoc = minidom.parseString(sr_xmlconfig) - variable_nodes = xmldoc.getElementsByTagName('variable') + variable_nodes = xmldoc.getElementsByTagName("variable") found = False for vn in variable_nodes: try: - name_element = vn.getElementsByTagName('name')[0] - name = name_element.getAttribute('value') + name_element = vn.getElementsByTagName("name")[0] + name = name_element.getAttribute("value") except IndexError: - log_err("variable missing 'name' tag in perfmon xml config of SR %s" % sruuid) - continue # perhaps other nodes are valid - print_debug("Found variable with name %s on SR uuid %s" % (name, sruuid)) - if name != 'sr_io_throughput_total_per_host': - continue # Do nothing unless the variable is meant for the host - if len(vn.getElementsByTagName('rrd_regex')) > 0: - log_err("Configuration error: rrd_regex must not be specified in config on SR meant for each host") - continue # perhaps another node is valid + log_err( + "variable missing 'name' tag in perfmon xml config of SR %s" + % sruuid + ) + continue # perhaps other nodes are valid + print_debug( + "Found variable with name %s on SR uuid %s" % (name, sruuid) + ) + if name != "sr_io_throughput_total_per_host": + continue # Do nothing unless the variable is meant for the host + if len(vn.getElementsByTagName("rrd_regex")) > 0: + log_err( + "Configuration error: rrd_regex must not be specified in config on SR meant for each host" + ) + continue # perhaps another node is valid if found: - log_err("Configuration error: duplicate variable %s on SR %s" % (name, sruuid)) + log_err( + "Configuration error: duplicate variable %s on SR %s" + % (name, sruuid) + ) # A host can only have one Variable from a given SR since we only accept one kind (one name). break found = True - name_override = 'sr_io_throughput_total_%s' % sruuid[0:8] - name_element.setAttribute('value', name_override) - provenance_element = xmldoc.createElement('configured_on') - provenance_element.setAttribute('class', 'SR') - provenance_element.setAttribute('uuid', sruuid) + name_override = "sr_io_throughput_total_%s" % sruuid[0:8] + name_element.setAttribute("value", name_override) + provenance_element = xmldoc.createElement("configured_on") + provenance_element.setAttribute("class", "SR") + provenance_element.setAttribute("uuid", sruuid) vn.appendChild(provenance_element) var = Variable(vn, self.alarm_create, self.get_default_variable_config) variable_names.add(var.name) append_var = True - vars_with_same_name = [ v for v in self.secondary_variables if v.name == var.name ] + vars_with_same_name = [ + v for v in self.secondary_variables if v.name == var.name + ] for v in vars_with_same_name: # this list should be 0 or 1 long! # only replace variable in self.secondary_variables if its config has changed. # This way we don't reset its state if variable_configs_differ(var, v): - print_debug("Removing existing secondary variable to replace with new: %s" % v.name) + print_debug( + "Removing existing secondary variable to replace with new: %s" + % v.name + ) self.secondary_variables.remove(v) else: - print_debug("Found existing secondary variable with same config: %s" % v.name) + print_debug( + "Found existing secondary variable with same config: %s" + % v.name + ) append_var = False if append_var: - print_debug("Adding %s to set of secondary variables for host UUID=%s" % (var.name, self.uuid)) + print_debug( + "Adding %s to set of secondary variables for host UUID=%s" + % (var.name, self.uuid) + ) self.secondary_variables.add(var) # Now that we have read all the xml items, # delete any old variables that do not appear in the new variable_nodes - print_debug("Going to delete any secondary_variables not in %s" % variable_names) - variables_to_remove = [ v for v in self.secondary_variables if v.name not in variable_names ] + print_debug( + "Going to delete any secondary_variables not in %s" % variable_names + ) + variables_to_remove = [ + v for v in self.secondary_variables if v.name not in variable_names + ] for v in variables_to_remove: - print_debug("Deleting %s from set of secondary variables for UUID=%s" % (v.name, self.uuid)) + print_debug( + "Deleting %s from set of secondary variables for UUID=%s" + % (v.name, self.uuid) + ) self.secondary_variables.remove(v) + all_xmlconfigs = {} -sruuids_by_hostuuid = {} # Maps host uuid to a set of the uuids of the host's SRs that have other-config:perfmon +sruuids_by_hostuuid = ( + {} +) # Maps host uuid to a set of the uuids of the host's SRs that have other-config:perfmon + + def update_all_xmlconfigs(session): """Update all_xmlconfigs, a global dictionary that maps any uuid (SR, host or VM) to the xml config string in other-config:perfmon keys @@ -930,42 +1181,44 @@ def update_all_xmlconfigs(session): lookup of the other-config:perfmon xml of the SRs connected to a host""" global all_xmlconfigs global sruuids_by_hostuuid - + all_host_recs = session.xenapi.host.get_all_records() - all_vm_recs = session.xenapi.VM.get_all_records() - all_sr_recs = session.xenapi.SR.get_all_records() + all_vm_recs = session.xenapi.VM.get_all_records() + all_sr_recs = session.xenapi.SR.get_all_records() # build dictionary mapping uuids to other_configs all_otherconfigs = {} for recs in (all_host_recs, all_vm_recs, all_sr_recs): - all_otherconfigs.update([ - (recs[ref]['uuid'], recs[ref]['other_config']) - for ref in recs.keys() - ]) + all_otherconfigs.update( + [(recs[ref]["uuid"], recs[ref]["other_config"]) for ref in recs.keys()] + ) # rebuild dictionary mapping uuids to xmlconfigs all_xmlconfigs.clear() - all_xmlconfigs.update([ - (uuid, other_config['perfmon']) + all_xmlconfigs.update( + [ + (uuid, other_config["perfmon"]) for (uuid, other_config) in all_otherconfigs.items() - if 'perfmon' in other_config - ]) + if "perfmon" in other_config + ] + ) # Rebuild another map sruuids_by_hostuuid.clear() - for (sr, rec) in all_sr_recs.items(): - if 'perfmon' in rec['other_config']: - sruuid = rec['uuid'] + for sr, rec in all_sr_recs.items(): + if "perfmon" in rec["other_config"]: + sruuid = rec["uuid"] # If we hadn't done SR.get_all_records we would now do SR.get_PBDs. - host_refs = [session.xenapi.PBD.get_host(pbd) for pbd in rec['PBDs']] - host_uuids = [all_host_recs[ref]['uuid'] for ref in host_refs] + host_refs = [session.xenapi.PBD.get_host(pbd) for pbd in rec["PBDs"]] + host_uuids = [all_host_recs[ref]["uuid"] for ref in host_refs] for hu in host_uuids: if hu in sruuids_by_hostuuid: sruuids_by_hostuuid[hu].add(sruuid) else: sruuids_by_hostuuid[hu] = {sruuid} + # 5 minute default interval interval = 300 interval_percent_dither = 5 @@ -978,35 +1231,46 @@ config_update_period = 1800 cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) cmdmaxlen = 256 + def main(): global interval global interval_percent_dither global rrd_step global debug global config_update_period - maxruns=None + maxruns = None try: argv = sys.argv[1:] - opts, args = getopt.getopt(argv, "i:n:ds:c:D:", - ["interval=", "numloops=","debug","rrdstep=","config_update_period=","interval_percent_dither="]) + opts, args = getopt.getopt( + argv, + "i:n:ds:c:D:", + [ + "interval=", + "numloops=", + "debug", + "rrdstep=", + "config_update_period=", + "interval_percent_dither=", + ], + ) except getopt.GetoptError: raise UsageException - + configfname = None for opt, arg in opts: - if opt == '-i' or opt == '--interval': + if opt == "-i" or opt == "--interval": interval = int(arg) - elif opt == '-n' or opt == '--numloops': + elif opt == "-n" or opt == "--numloops": maxruns = int(arg) - elif opt == '-d' or opt == '--debug': + elif opt == "-d" or opt == "--debug": debug = True - elif opt == '-s' or opt == '--rrdstep': + elif opt == "-s" or opt == "--rrdstep": rrd_step = int(arg) if rrd_step != 5 and rrd_step != 60: raise UsageException - elif opt == '-c' or opt == '--config_update_period': + elif opt == "-c" or opt == "--config_update_period": config_update_period = int(arg) - elif opt == '-D' or opt == '--interval_percent_dither': + elif opt == "-D" or opt == "--interval_percent_dither": interval_percent_dither = int(arg) else: raise UsageException @@ -1015,23 +1279,22 @@ def main(): cmdsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) cmdsock.bind(cmdsockname) - # The dither on each loop (prevents stampede on master) rand = random.Random().uniform - dither = (interval * interval_percent_dither)/100.0 + dither = (interval * interval_percent_dither) / 100.0 # Create a XAPI session on first run restart_session = True # Create a client for getting the rrd_updates over HTTP - rrd_updates = RRDUpdates() + rrd_updates = RRDUpdates() - # Work out when next to update all the xmlconfigs for all the + # Work out when next to update all the xmlconfigs for all the # hosts and all the VMs. This causes a lot of data to be retrieved # from the master, so we only do it once every config_update_period # and we cache the results next_config_update = time.time() - + # monitors for vms running on this host. # This dictionary uses uuids to lookup each monitor object vm_mon_lookup = {} @@ -1042,7 +1305,7 @@ def main(): # The monitor for the host host_mon = None - + runs = 0 while True: print_debug("Run: %d" % runs) @@ -1055,24 +1318,24 @@ def main(): restart_session = False rrd_updates.refresh(session) - + # Should we update all_xmlconfigs if time.time() >= next_config_update: print_debug("Updating all_xmlconfigs") # yes - update all the xml configs: this generates a few LARGE xapi messages from the master update_all_xmlconfigs(session) - + # Set time when to do this next next_config_update = time.time() + config_update_period # List of VMs present in rrd_updates - vm_uuid_list = rrd_updates.get_uuid_list_by_objtype('vm') + vm_uuid_list = rrd_updates.get_uuid_list_by_objtype("vm") # Remove any monitors for VMs no longer listed in rrd_updates page for uuid in vm_mon_lookup.keys(): if uuid not in vm_uuid_list: vm_mon_lookup.pop(uuid) - + # Create monitors for VMs that have just appeared in rrd_updates page for uuid in vm_uuid_list: if uuid not in vm_mon_lookup.keys(): @@ -1080,11 +1343,13 @@ def main(): else: # check if the config has changed, e.g. by XenCenter vm_mon_lookup[uuid].refresh_config() - + # Remove monitor for the host if it's no longer listed in rrd_updates page # Create monitor for the host if it has just appeared in rrd_updates page try: - host_uuid = rrd_updates.get_uuid_list_by_objtype('host')[0] # should only ever be one of these + host_uuid = rrd_updates.get_uuid_list_by_objtype("host")[ + 0 + ] # should only ever be one of these except: # list may be empty! host_uuid = None @@ -1094,14 +1359,16 @@ def main(): elif not host_mon: host_mon = HOSTMonitor(host_uuid) elif host_mon.uuid != host_uuid: - raise PerfMonException("host uuid in rrd_updates changed (old: %s, new %s)" % \ - (host_mon.uuid, host_uuid)) + raise PerfMonException( + "host uuid in rrd_updates changed (old: %s, new %s)" + % (host_mon.uuid, host_uuid) + ) else: # check if the config has changed, e.g. by XenCenter host_mon.refresh_config() # List of SRs present in rrd_updates - sr_uuid_list = rrd_updates.get_uuid_list_by_objtype('sr') + sr_uuid_list = rrd_updates.get_uuid_list_by_objtype("sr") print_debug("sr_uuid_list = %s" % sr_uuid_list) # Remove monitors for SRs no longer listed in the rrd_updates page @@ -1133,21 +1400,27 @@ def main(): time.sleep(2) pass - log_err("caught socket.error: (%s) - restarting XAPI session" % " ".join([str(x) for x in e.args])) + log_err( + "caught socket.error: (%s) - restarting XAPI session" + % " ".join([str(x) for x in e.args]) + ) restart_session = True except IOError as e: - if e.args[0] == 'http error' and e.args[1] in (401, 500): + if e.args[0] == "http error" and e.args[1] in (401, 500): # Error getting rrd_updates: 401=Unauthorised, 500=Internal - start new session - pass - elif e.args[0] == 'socket error': + pass + elif e.args[0] == "socket error": # This happens if we send messages or read other-config:perfmon after xapi is restarted pass else: # Don't know why we got this error - crash, die and look at logs later raise - log_err("caught IOError: (%s) - restarting XAPI session" % " ".join([str(x) for x in e.args])) + log_err( + "caught IOError: (%s) - restarting XAPI session" + % " ".join([str(x) for x in e.args]) + ) restart_session = True runs += 1 @@ -1178,26 +1451,28 @@ def main(): return 0 + def sigterm_handler(sig, stack_frame): log_err("Caught signal %d - exiting" % sig) sys.exit(1) + pidfile = "/var/run/perfmon.pid" if __name__ == "__main__": - + # setup signal handler to print out notice when killed signal.signal(signal.SIGTERM, sigterm_handler) - - if '--daemon' in sys.argv[1:]: - sys.argv.remove('--daemon') + + if "--daemon" in sys.argv[1:]: + sys.argv.remove("--daemon") if os.fork() != 0: sys.exit(0) os.setsid() - sys.stdout=open("/dev/null", 'w') - sys.stdin=open("/dev/null", 'r') - sys.stderr=sys.stdout - + sys.stdout = open("/dev/null", "w") + sys.stdin = open("/dev/null", "r") + sys.stderr = sys.stdout + # Exit if perfmon already running if os.path.exists(pidfile): pid = open(pidfile).read() @@ -1206,8 +1481,8 @@ if __name__ == "__main__": sys.exit(3) try: - # Write out pidfile - fd = open(pidfile,"w") + # Write out pidfile + fd = open(pidfile, "w") fd.write("%d" % os.getpid()) fd.close() @@ -1216,17 +1491,19 @@ if __name__ == "__main__": except UsageException as e: # Print the usage - log_err("usage: %s [-i -n -d -s -c -D ] \\\n" \ - "\t[--interval= --numloops= --debug \\\n" \ - "\t --rrdstep= --daemon]\n" \ - "\t --config_update_period=\n" \ - "\t --interval_percent_dither=\n" \ - " interval:\tseconds between reads of http://localhost/rrd_updates?...\n" \ - " loops:\tnumber of times to run before exiting\n" \ - " rrd_step:\tseconds between samples provided by rrd_updates. Valid values are 5 or 60\n" \ - " config_update_period:\tseconds between getting updates of all VM/host records from master\n" \ - " interval_percent_dither:\tmax percent dither in each loop - prevents stampede on master\n" \ - % (sys.argv[0])) + log_err( + "usage: %s [-i -n -d -s -c -D ] \\\n" + "\t[--interval= --numloops= --debug \\\n" + "\t --rrdstep= --daemon]\n" + "\t --config_update_period=\n" + "\t --interval_percent_dither=\n" + " interval:\tseconds between reads of http://localhost/rrd_updates?...\n" + " loops:\tnumber of times to run before exiting\n" + " rrd_step:\tseconds between samples provided by rrd_updates. Valid values are 5 or 60\n" + " config_update_period:\tseconds between getting updates of all VM/host records from master\n" + " interval_percent_dither:\tmax percent dither in each loop - prevents stampede on master\n" + % (sys.argv[0]) + ) rc = 1 except SystemExit: @@ -1239,16 +1516,16 @@ if __name__ == "__main__": log_err("Exception is of class %s" % e.__class__) ex = sys.exc_info() err = traceback.format_exception(*ex) - + # Python built-in Exception has args, # but XenAPI.Failure has details instead. Sigh. try: - errmsg = "\n".join([ str(x) for x in e.args ]) + errmsg = "\n".join([str(x) for x in e.args]) # print the exception args nicely log_err(errmsg) except Exception as ignored: try: - errmsg = "\n".join([ str(x) for x in e.details ]) + errmsg = "\n".join([str(x) for x in e.details]) # print the exception args nicely log_err(errmsg) except Exception as ignored: From d834700401e8765f5948f2312f03f3455933ceaa Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 12 Mar 2024 08:33:08 +0000 Subject: [PATCH 029/341] CP-47653: py2->py3 for perfmon Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 46 +++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 3b68b485aa9..1c2f3c3f95a 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # perfmon - a daemon for monitoring performance of the host on which it is run # and of all the local VMs, and for generating events based on configurable @@ -29,9 +29,8 @@ # # The "cf" CGI param specfies the row. (All rows are returned if it's missing.) -from __future__ import print_function -import commands +import subprocess import gc import getopt import os @@ -43,9 +42,11 @@ import sys import syslog import time import traceback -import urllib +import urllib.request + # used to parse rrd_updates because this may be large and sax is more efficient from xml import sax + # used to parse other-config:perfmon. Efficiency is less important than reliability here from xml.dom import minidom from xml.parsers.expat import ExpatError @@ -139,7 +140,7 @@ class ObjectReport: return self.uuid def get_var_names(self): - return self.vars.keys() + return list(self.vars.keys()) def get_value(self, var_name, row): try: @@ -347,10 +348,8 @@ class RRDUpdates: paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params]) print_debug("Calling http://localhost/rrd_updates?%s" % paramstr) - # this is better than urllib.urlopen() as it raises an Exception on http 401 'Unauthorised' error - # rather than drop into interactive mode - sock = urllib.URLopener().open("http://localhost/rrd_updates?%s" % paramstr) - xmlsource = sock.read() + sock = urllib.request.urlopen("http://localhost/rrd_updates?%s" % paramstr) + xmlsource = sock.read().decode("utf-8") sock.close() # Use sax rather than minidom and save Vvvast amounts of time and memory. @@ -405,8 +404,8 @@ def average(mylist): def get_percent_log_fs_usage(ignored): "Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty" - fs_output = commands.getoutput("df /etc/passwd") - log_fs_output = commands.getoutput("df /var/log") + fs_output = subprocess.getoutput("df /etc/passwd") + log_fs_output = subprocess.getoutput("df /var/log") fs_output = " ".join(fs_output.splitlines()[1:]) log_fs_output = " ".join(log_fs_output.splitlines()[1:]) # Get the percent usage only when there is a separate logs partition @@ -421,7 +420,7 @@ def get_percent_log_fs_usage(ignored): def get_percent_fs_usage(ignored): "Get the percent usage of the host filesystem. Input list is ignored and should be empty" # this file is on the filesystem of interest in both OEM and Retail - output = commands.getoutput("df /etc/passwd") + output = subprocess.getoutput("df /etc/passwd") output = " ".join( output.splitlines()[1:] ) # remove header line and rewrap on single line @@ -755,13 +754,14 @@ class ObjectMonitor: for var in self.get_active_variables(): # find the subset of the params returned for this object that we need to consolidate into var - params_to_consolidate = filter(var.rrd_regex.match, params_in_obj_report) + params_to_consolidate = list( + filter(var.rrd_regex.match, params_in_obj_report) + ) for row in range(num_rows): # Get the values to consolidate - values_to_consolidate = map( - lambda param: obj_report.get_value(param, row), - params_to_consolidate, - ) + values_to_consolidate = [ + obj_report.get_value(param, row) for param in params_to_consolidate + ] # Consolidate them value = var.consolidation_fn(values_to_consolidate) # Pass result on to the variable object - this may result in an alarm being generated @@ -1191,7 +1191,7 @@ def update_all_xmlconfigs(session): for recs in (all_host_recs, all_vm_recs, all_sr_recs): all_otherconfigs.update( - [(recs[ref]["uuid"], recs[ref]["other_config"]) for ref in recs.keys()] + [(recs[ref]["uuid"], recs[ref]["other_config"]) for ref in recs] ) # rebuild dictionary mapping uuids to xmlconfigs @@ -1332,13 +1332,14 @@ def main(): vm_uuid_list = rrd_updates.get_uuid_list_by_objtype("vm") # Remove any monitors for VMs no longer listed in rrd_updates page - for uuid in vm_mon_lookup.keys(): + # We use .pop() inside the loop, use list(dict_var.keys()): + for uuid in list(vm_mon_lookup.keys()): if uuid not in vm_uuid_list: vm_mon_lookup.pop(uuid) # Create monitors for VMs that have just appeared in rrd_updates page for uuid in vm_uuid_list: - if uuid not in vm_mon_lookup.keys(): + if uuid not in vm_mon_lookup: vm_mon_lookup[uuid] = VMMonitor(uuid) else: # check if the config has changed, e.g. by XenCenter @@ -1372,12 +1373,13 @@ def main(): print_debug("sr_uuid_list = %s" % sr_uuid_list) # Remove monitors for SRs no longer listed in the rrd_updates page - for uuid in sr_mon_lookup.keys(): + # We use .pop() inside the loop, use list(dict_var.keys()): + for uuid in list(sr_mon_lookup.keys()): if uuid not in sr_uuid_list: sr_mon_lookup.pop(uuid) # Create monitors for SRs that have just appeared in rrd_updates page for uuid in sr_uuid_list: - if uuid not in sr_mon_lookup.keys(): + if uuid not in sr_mon_lookup: sr_mon_lookup[uuid] = SRMonitor(uuid) else: sr_mon_lookup[uuid].refresh_config() From a195f64225015e5650e50c323b5fa06eb185c509 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 14 Mar 2024 09:04:26 +0000 Subject: [PATCH 030/341] CP-47653: Fix pylint `line-too-long` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 167 +++++++++++++++++++++++++++++--------------- 1 file changed, 112 insertions(+), 55 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 1c2f3c3f95a..fc46a01e19a 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -29,6 +29,12 @@ # # The "cf" CGI param specfies the row. (All rows are returned if it's missing.) +# pylint: disable=too-many-lines, missing-function-docstring, missing-module-docstring +# pylint: disable=consider-using-f-string, missing-class-docstring, too-few-public-methods +# pylint: disable=too-many-instance-attributes, import-error, unnecessary-pass +# pylint: disable=too-many-branches, too-many-arguments, broad-exception-caught +# pylint: disable=no-else-break, no-else-return, invalid-name + import subprocess import gc @@ -48,7 +54,7 @@ import urllib.request from xml import sax # used to parse other-config:perfmon. Efficiency is less important than reliability here -from xml.dom import minidom +from xml.dom import minidom # pytype: disable=pyi-error from xml.parsers.expat import ExpatError import XenAPI @@ -254,7 +260,8 @@ class RRDContentHandler(sax.ContentHandler): or self.in_columns_tag or self.in_entry_tag or - # self.in_row_tag # ignore text under row tag, s are just for holding and nodes + # self.in_row_tag + # ignore text under row tag, s are just for holding and nodes self.in_t_tag or self.in_v_tag ): @@ -292,7 +299,8 @@ class RRDContentHandler(sax.ContentHandler): self.in_row_tag = False self.row += 1 elif name == "t": - # Extract start and end time from row data as it's more reliable than the values in the meta data + # Extract start and end time from row data + # as it's more reliable than the values in the meta data t = int(self.raw_text) # Last row corresponds to start time self.report.start_time = t @@ -378,7 +386,10 @@ class RRDUpdates: return None def get_uuid_list_by_objtype(self, objtype): - "Return a list of uuids corresonding to the objects of this type for which we have ObjectReports" + ''' + Return a list of uuids corresonding to the objects + of this type for which we have ObjectReports + ''' return [ objrep.uuid for objrep in self.report.obj_reports.values() @@ -403,7 +414,10 @@ def average(mylist): def get_percent_log_fs_usage(ignored): - "Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty" + ''' + Get the percent usage of the host filesystem for logs partition. + Input list is ignored and should be empty + ''' fs_output = subprocess.getoutput("df /etc/passwd") log_fs_output = subprocess.getoutput("df /var/log") fs_output = " ".join(fs_output.splitlines()[1:]) @@ -438,7 +452,7 @@ def get_percent_mem_usage(ignored): memdict = [m.split(":", 1) for m in memlist] memdict = dict( [ - (k.strip(), float(re.search("\d+", v.strip()).group(0))) + (k.strip(), float(re.search(r"\d+", v.strip()).group(0))) for (k, v) in memdict ] ) @@ -462,7 +476,10 @@ def get_percent_mem_usage(ignored): def get_percent_sr_usage(mylist): - """Get the percent usage of the SR. Input list should be exactly two items: [physical_utilisation, size]""" + """ + Get the percent usage of the SR. + Input list should be exactly two items: [physical_utilisation, size] + """ try: if len(mylist) != 2: raise Exception( @@ -480,13 +497,13 @@ class VariableConfig: """Object storing the configuration of a Variable Initialisation parameters: - xmldoc = dom object representing the nodes in the ObjectMonitor config strings. - See VMMonitor.__doc__ and HOSTMonitor.__doc__ - alarm_create_callback = - callback called by Variable.update() to create and send an alarm - get_default_variable_config = - a function that VariableConfig.__init__() uses to lookup default tag values - by variable name + xmldoc = dom object representing the nodes in the ObjectMonitor config strings. + See VMMonitor.__doc__ and HOSTMonitor.__doc__ + alarm_create_callback = + callback called by Variable.update() to create and send an alarm + get_default_variable_config = + a function that VariableConfig.__init__() uses to lookup default tag values + by variable name """ def __init__(self, xmldoc, alarm_create_callback, get_default_variable_config): @@ -753,7 +770,8 @@ class ObjectMonitor: params_in_obj_report = obj_report.get_var_names() for var in self.get_active_variables(): - # find the subset of the params returned for this object that we need to consolidate into var + # find the subset of the params returned for this object + # that we need to consolidate into var params_to_consolidate = list( filter(var.rrd_regex.match, params_in_obj_report) ) @@ -764,7 +782,8 @@ class ObjectMonitor: ] # Consolidate them value = var.consolidation_fn(values_to_consolidate) - # Pass result on to the variable object - this may result in an alarm being generated + # Pass result on to the variable object + # This may result in an alarm being generated var.update(value, session) def alarm_create(self, var, session, message): @@ -783,7 +802,8 @@ class VMMonitor(ObjectMonitor): Configured by writing an xml string into an other-config key, e.g. xe vm-param-set uuid=$vmuuid other-config:perfmon=\ - '' + ' + ' Notes: - Multiple nodes allowed @@ -791,12 +811,19 @@ class VMMonitor(ObjectMonitor): * name: what to call the variable (no default) * alarm_priority: the priority of the messages generated (default '3') * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') - * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'average' for 'cpu_usage', 'get_percent_fs_usage' for 'fs_usage', 'get_percent_log_fs_usage' for 'log_fs_usage', 'get_percent_mem_usage' for 'mem_usage', & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe vm-data-sources-list uuid=$vmuuid) used to compute value + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') + * consolidation_fn: + how to combine variables from rrd_updates into one value + (default is 'average' for 'cpu_usage', 'get_percent_fs_usage' for 'fs_usage', + 'get_percent_log_fs_usage' for 'log_fs_usage', + 'get_percent_mem_usage' for 'mem_usage', & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe vm-data-sources-list uuid=$vmuuid) used to compute value (only has defaults for "cpu_usage", "network_usage", and "disk_usage") """ @@ -873,7 +900,8 @@ class SRMonitor(ObjectMonitor): Configured by writing an xml string into an other-config key, e.g. xe sr-param-set uuid=$vmuuid other-config:perfmon=\ - '' + ' + ' Notes: - Multiple nodes allowed @@ -881,12 +909,18 @@ class SRMonitor(ObjectMonitor): * name: what to call the variable (no default) * alarm_priority: the priority of the messages generated (default '3') * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') - * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'get_percent_sr_usage' for 'physical_utilistation', & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe sr-data-sources-list uuid=$sruuid) used to compute value + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') + * consolidation_fn: + how to combine variables from rrd_updates into one value + (default is 'get_percent_sr_usage' for 'physical_utilistation', + & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe sr-data-sources-list uuid=$sruuid) used to compute value (has default for "physical_utilistaion") """ @@ -906,7 +940,8 @@ class SRMonitor(ObjectMonitor): if variable_name == "physical_utilisation": return "physical_utilisation|size" elif variable_name == "sr_io_throughput_total_per_host": - return "_$_DUMMY__" # (these are to drive Host RRDs and so are handled by the HOSTMonitor) + # (these are to drive Host RRDs and so are handled by the HOSTMonitor) + return "_$_DUMMY__" else: raise XmlConfigException( "variable %s: no default rrd_regex - please specify one" @@ -940,7 +975,8 @@ class HOSTMonitor(ObjectMonitor): Configured by writing an xml string into an other-config key, e.g. xe host-param-set uuid=$hostuuid other-config:perfmon=\ - '' + ' + ' Notes: - Multiple nodes allowed @@ -948,22 +984,30 @@ class HOSTMonitor(ObjectMonitor): * name: what to call the variable (no default) * alarm_priority: the priority of the messages generated (default '3') * alarm_trigger_level: level of value that triggers an alarm (no default) - * alarm_trigger_sense: 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') - * alarm_trigger_period: num seconds of 'bad' values before an alarm is sent (default '60') - * alarm_auto_inhibit_period: num seconds this alarm disabled after an alarm is sent (default '3600') + * alarm_trigger_sense: + 'high' if alarm_trigger_level is a max, otherwise 'low'. (default 'high') + * alarm_trigger_period: + num seconds of 'bad' values before an alarm is sent (default '60') + * alarm_auto_inhibit_period: + num seconds this alarm disabled after an alarm is sent (default '3600') * consolidation_fn: how to combine variables from rrd_updates into one value - (default is 'average' for 'cpu_usage' & 'sum' for everything else) - * rrd_regex matches the names of variables from (xe host-data-source-list uuid=$hostuuid) used to compute value - (only has defaults for "cpu_usage", "network_usage", "memory_free_kib" and "sr_io_throughput_total_xxxxxxxx" + (default is 'average' for 'cpu_usage' & 'sum' for everything else) + * rrd_regex matches the names of variables + from (xe host-data-source-list uuid=$hostuuid) used to compute value + (only has defaults for "cpu_usage", "network_usage", "memory_free_kib" + and "sr_io_throughput_total_xxxxxxxx" where that last one ends with the first eight characters of the SR uuid) Also, as a special case for SR throughput, it is possible to configure a Host by writing xml into the other-config key of an SR connected to it, e.g. xe sr-param-set uuid=$sruuid other-config:perfmon=\ - ' + ' + - This only works for that one specific variable-name, and rrd_regex must not be specified. - Configuration done on the host directly (variable-name sr_io_throughput_total_xxxxxxxx) takes priority. + This only works for that one specific variable-name, + and rrd_regex must not be specified. + Configuration done on the host directly + (variable-name sr_io_throughput_total_xxxxxxxx) takes priority. """ def __init__(self, *args): @@ -1042,7 +1086,8 @@ class HOSTMonitor(ObjectMonitor): for sruuid in sruuids_by_hostuuid[self.uuid]: sr_xmlconfig = all_xmlconfigs[sruuid] # As an optimisation, if xml unchanged then do not re-parse. - # Otherwise we would create Variables which would turn out to be same as existing ones so we would ignore them. + # Otherwise we would create Variables which would + # turn out to be same as existing ones so we would ignore them. if ( sruuid in self.secondary_xmlconfigs and self.secondary_xmlconfigs[sruuid] == sr_xmlconfig @@ -1072,7 +1117,8 @@ class HOSTMonitor(ObjectMonitor): ) if main_changed or secondary_changed: - # Calculate which secondary variables are active, i.e. not overridden by ones configured on the host rather than the SR. + # Calculate which secondary variables are active, + # i.e. not overridden by ones configured on the host rather than the SR. main_names = {v.name for v in self.variables} for v in self.secondary_variables: v.set_active(v.name not in main_names) @@ -1106,7 +1152,8 @@ class HOSTMonitor(ObjectMonitor): continue # Do nothing unless the variable is meant for the host if len(vn.getElementsByTagName("rrd_regex")) > 0: log_err( - "Configuration error: rrd_regex must not be specified in config on SR meant for each host" + "Configuration error:" \ + "rrd_regex must not be specified in config on SR meant for each host" ) continue # perhaps another node is valid if found: @@ -1114,7 +1161,8 @@ class HOSTMonitor(ObjectMonitor): "Configuration error: duplicate variable %s on SR %s" % (name, sruuid) ) - # A host can only have one Variable from a given SR since we only accept one kind (one name). + # A host can only have one Variable from a given SR + # since we only accept one kind (one name). break found = True name_override = "sr_io_throughput_total_%s" % sruuid[0:8] @@ -1228,7 +1276,8 @@ debug = False # rate to call update_all_xmlconfigs() config_update_period = 1800 -cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) +# an af_unix socket name (the "\0" stops socket.bind() creating a fs node) +cmdsockname = "\0perfmon" cmdmaxlen = 256 @@ -1322,7 +1371,8 @@ def main(): # Should we update all_xmlconfigs if time.time() >= next_config_update: print_debug("Updating all_xmlconfigs") - # yes - update all the xml configs: this generates a few LARGE xapi messages from the master + # yes - update all the xml configs: + # this generates a few LARGE xapi messages from the master update_all_xmlconfigs(session) # Set time when to do this next @@ -1384,7 +1434,8 @@ def main(): else: sr_mon_lookup[uuid].refresh_config() - # Go through each vm_mon and update it using the rrd_udpates - this may generate alarms + # Go through each vm_mon and update it using the rrd_udpates + # this may generate alarms for vm_mon in vm_mon_lookup.values(): vm_mon.process_rrd_updates(rrd_updates, session) @@ -1398,7 +1449,8 @@ def main(): except socket.error as e: if e.args[0] == 111: - # "Connection refused" - this happens when we try to restart session and *that* fails + # "Connection refused" + # this happens when we try to restart session and *that* fails time.sleep(2) pass @@ -1413,7 +1465,8 @@ def main(): # Error getting rrd_updates: 401=Unauthorised, 500=Internal - start new session pass elif e.args[0] == "socket error": - # This happens if we send messages or read other-config:perfmon after xapi is restarted + # This happens if we send messages or + # read other-config:perfmon after xapi is restarted pass else: # Don't know why we got this error - crash, die and look at logs later @@ -1494,16 +1547,20 @@ if __name__ == "__main__": except UsageException as e: # Print the usage log_err( - "usage: %s [-i -n -d -s -c -D ] \\\n" + "usage: %s [-i -n -d -s -c" \ + " -D ] \\\n" "\t[--interval= --numloops= --debug \\\n" "\t --rrdstep= --daemon]\n" "\t --config_update_period=\n" "\t --interval_percent_dither=\n" " interval:\tseconds between reads of http://localhost/rrd_updates?...\n" " loops:\tnumber of times to run before exiting\n" - " rrd_step:\tseconds between samples provided by rrd_updates. Valid values are 5 or 60\n" - " config_update_period:\tseconds between getting updates of all VM/host records from master\n" - " interval_percent_dither:\tmax percent dither in each loop - prevents stampede on master\n" + " rrd_step:\tseconds between samples provided by rrd_updates." \ + " Valid values are 5 or 60\n" + " config_update_period:\tseconds between getting updates" \ + " of all VM/host records from master\n" + " interval_percent_dither:\tmax percent dither in each loop" \ + " - prevents stampede on master\n" % (sys.argv[0]) ) rc = 1 @@ -1527,7 +1584,7 @@ if __name__ == "__main__": log_err(errmsg) except Exception as ignored: try: - errmsg = "\n".join([str(x) for x in e.details]) + errmsg = "\n".join([str(x) for x in e.details]) # pytype: disable=attribute-error # print the exception args nicely log_err(errmsg) except Exception as ignored: From 3d9546a867618800088324eff8eedd539735b0b6 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 14 Mar 2024 09:12:08 +0000 Subject: [PATCH 031/341] CP-47653: Fix pylint `redefined-outer-name` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index fc46a01e19a..365f9871238 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -1544,7 +1544,7 @@ if __name__ == "__main__": # run the main loop rc = main() - except UsageException as e: + except UsageException: # Print the usage log_err( "usage: %s [-i -n -d -s -c" \ @@ -1569,25 +1569,25 @@ if __name__ == "__main__": # we caught a signal which we have already logged pass - except Exception as e: + except Exception as exp: rc = 2 log_err("FATAL ERROR: perfmon will exit") - log_err("Exception is of class %s" % e.__class__) + log_err("Exception is of class %s" % exp.__class__) ex = sys.exc_info() err = traceback.format_exception(*ex) # Python built-in Exception has args, # but XenAPI.Failure has details instead. Sigh. try: - errmsg = "\n".join([str(x) for x in e.args]) + err_msg = "\n".join([str(x) for x in exp.args]) # print the exception args nicely - log_err(errmsg) - except Exception as ignored: + log_err(err_msg) + except Exception: try: - errmsg = "\n".join([str(x) for x in e.details]) # pytype: disable=attribute-error + err_msg = "\n".join([str(x) for x in exp.details]) # pytype: disable=attribute-error # print the exception args nicely - log_err(errmsg) - except Exception as ignored: + log_err(err_msg) + except Exception: pass # now log the traceback to syslog From 67d8ae16ad2dda8ba263301bbfa856281546a071 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 14 Mar 2024 09:24:41 +0000 Subject: [PATCH 032/341] CP-47653: Fix pylint `attribute-defined-outside-init` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 365f9871238..63005a70182 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -35,6 +35,8 @@ # pylint: disable=too-many-branches, too-many-arguments, broad-exception-caught # pylint: disable=no-else-break, no-else-return, invalid-name +# pylint: disable=global-statement + import subprocess import gc @@ -226,6 +228,10 @@ class RRDContentHandler(sax.ContentHandler): self.in_row_tag = False self.column_details = [] self.row = 0 + self.raw_text = "" + self.col = 0 + self.in_t_tag = False + self.in_v_tag = False def startElement(self, name, attrs): self.raw_text = "" From b34f3e55b0847540d87b0f1fb5d85d3d204748d9 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 15 Mar 2024 00:41:24 +0000 Subject: [PATCH 033/341] CP-47653: Fix pylint `unused-argument` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 63005a70182..08fe3329fc2 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -424,6 +424,7 @@ def get_percent_log_fs_usage(ignored): Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty ''' + _ = ignored # unused: not sure if it'll be used later, passing pylint fs_output = subprocess.getoutput("df /etc/passwd") log_fs_output = subprocess.getoutput("df /var/log") fs_output = " ".join(fs_output.splitlines()[1:]) @@ -438,7 +439,12 @@ def get_percent_log_fs_usage(ignored): def get_percent_fs_usage(ignored): - "Get the percent usage of the host filesystem. Input list is ignored and should be empty" + ''' + Get the percent usage of the host filesystem. + Input list is ignored and should be empty + ''' + _ = ignored # unused: not sure if it'll be used later, passing pylint + # this file is on the filesystem of interest in both OEM and Retail output = subprocess.getoutput("df /etc/passwd") output = " ".join( @@ -450,7 +456,11 @@ def get_percent_fs_usage(ignored): def get_percent_mem_usage(ignored): - "Get the percent usage of Dom0 memory/swap. Input list is ignored and should be empty" + ''' + Get the percent usage of Dom0 memory/swap. + Input list is ignored and should be empty + ''' + _ = ignored # unused: not sure if it'll be used later, passing pylint try: memfd = open("/proc/meminfo", "r") memlist = memfd.readlines() @@ -1514,6 +1524,7 @@ def main(): def sigterm_handler(sig, stack_frame): + _ = stack_frame # unused: not sure if it'll be used later, passing pylint log_err("Caught signal %d - exiting" % sig) sys.exit(1) From 8f3540af236999f2207cbfd3d0ea34a27f674b9e Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 06:57:30 +0000 Subject: [PATCH 034/341] CP-47653: Fix pylint `unspecified-encoding` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 08fe3329fc2..ba7040d4bc6 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -444,7 +444,7 @@ def get_percent_fs_usage(ignored): Input list is ignored and should be empty ''' _ = ignored # unused: not sure if it'll be used later, passing pylint - + # this file is on the filesystem of interest in both OEM and Retail output = subprocess.getoutput("df /etc/passwd") output = " ".join( @@ -462,7 +462,7 @@ def get_percent_mem_usage(ignored): ''' _ = ignored # unused: not sure if it'll be used later, passing pylint try: - memfd = open("/proc/meminfo", "r") + memfd = open("/proc/meminfo", "r", encoding="utf-8") memlist = memfd.readlines() memfd.close() memdict = [m.split(":", 1) for m in memlist] @@ -1541,20 +1541,21 @@ if __name__ == "__main__": if os.fork() != 0: sys.exit(0) os.setsid() - sys.stdout = open("/dev/null", "w") - sys.stdin = open("/dev/null", "r") + # For /dev/null, encoding is not needed + sys.stdout = open("/dev/null", "w") # pylint: disable=unspecified-encoding + sys.stdin = open("/dev/null", "r") # pylint: disable=unspecified-encoding sys.stderr = sys.stdout # Exit if perfmon already running if os.path.exists(pidfile): - pid = open(pidfile).read() + pid = open(pidfile, encoding="utf-8").read() if os.path.exists("/proc/%s" % pid): log_err("perfmon already running - exiting") sys.exit(3) try: # Write out pidfile - fd = open(pidfile, "w") + fd = open(pidfile, "w", encoding="utf-8") fd.write("%d" % os.getpid()) fd.close() From 3d72b3f88d3b9e7830f50416f6a5921725fd5378 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 07:02:32 +0000 Subject: [PATCH 035/341] CP-47653: Fix pylint `unused-variable` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index ba7040d4bc6..b844cf0ecaf 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -1270,7 +1270,7 @@ def update_all_xmlconfigs(session): # Rebuild another map sruuids_by_hostuuid.clear() - for sr, rec in all_sr_recs.items(): + for _, rec in all_sr_recs.items(): if "perfmon" in rec["other_config"]: sruuid = rec["uuid"] # If we hadn't done SR.get_all_records we would now do SR.get_PBDs. @@ -1306,7 +1306,7 @@ def main(): maxruns = None try: argv = sys.argv[1:] - opts, args = getopt.getopt( + opts, _ = getopt.getopt( argv, "i:n:ds:c:D:", [ @@ -1321,7 +1321,6 @@ def main(): except getopt.GetoptError: raise UsageException - configfname = None for opt, arg in opts: if opt == "-i" or opt == "--interval": interval = int(arg) From c5b9c6001f13b5c1106fbfe182133a8d427e3112 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 07:24:00 +0000 Subject: [PATCH 036/341] CP-47653: Fix pylint `consider-using-in` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index b844cf0ecaf..e9758fc93da 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -1322,19 +1322,19 @@ def main(): raise UsageException for opt, arg in opts: - if opt == "-i" or opt == "--interval": + if opt in ("-i", "--interval"): interval = int(arg) - elif opt == "-n" or opt == "--numloops": + elif opt in ("-n", "--numloops"): maxruns = int(arg) - elif opt == "-d" or opt == "--debug": + elif opt in ("-d", "--debug"): debug = True - elif opt == "-s" or opt == "--rrdstep": + elif opt in ("-s", "--rrdstep"): rrd_step = int(arg) - if rrd_step != 5 and rrd_step != 60: + if rrd_step not in (5, 60): raise UsageException - elif opt == "-c" or opt == "--config_update_period": + elif opt in ("-c", "--config_update_period"): config_update_period = int(arg) - elif opt == "-D" or opt == "--interval_percent_dither": + elif opt in ("-D", "--interval_percent_dither"): interval_percent_dither = int(arg) else: raise UsageException From 6f3f525ebe22845894eb0cf1437cad92a38607b6 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 07:49:30 +0000 Subject: [PATCH 037/341] CP-47653: Fix pylint `consider-using-with` warnings Signed-off-by: Stephen Cheng --- pyproject.toml | 3 +++ python3/bin/perfmon | 27 ++++++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 32bd0ad84d2..addefd26e72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,9 @@ disable = [ "invalid-name", "import-error", "unnecessary-pass", + "unspecified-encoding", + "protected-access", + "no-member", # Some mutiple inheritance classes may have this issue ] [tool.mypy] diff --git a/python3/bin/perfmon b/python3/bin/perfmon index e9758fc93da..6c2b50b1957 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -362,9 +362,9 @@ class RRDUpdates: paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params]) print_debug("Calling http://localhost/rrd_updates?%s" % paramstr) - sock = urllib.request.urlopen("http://localhost/rrd_updates?%s" % paramstr) - xmlsource = sock.read().decode("utf-8") - sock.close() + url = "http://localhost/rrd_updates?%s" % paramstr + with urllib.request.urlopen(url) as sock: + xmlsource = sock.read().decode("utf-8") # Use sax rather than minidom and save Vvvast amounts of time and memory. self.report.reset() @@ -462,9 +462,8 @@ def get_percent_mem_usage(ignored): ''' _ = ignored # unused: not sure if it'll be used later, passing pylint try: - memfd = open("/proc/meminfo", "r", encoding="utf-8") - memlist = memfd.readlines() - memfd.close() + with open("/proc/meminfo", "r", encoding="utf-8") as memfd: + memlist = memfd.readlines() memdict = [m.split(":", 1) for m in memlist] memdict = dict( [ @@ -1540,23 +1539,25 @@ if __name__ == "__main__": if os.fork() != 0: sys.exit(0) os.setsid() - # For /dev/null, encoding is not needed - sys.stdout = open("/dev/null", "w") # pylint: disable=unspecified-encoding - sys.stdin = open("/dev/null", "r") # pylint: disable=unspecified-encoding + # For /dev/null, `encoding` and `with` is not needed + # pylint: disable=unspecified-encoding, consider-using-with + sys.stdout = open("/dev/null", "w") + sys.stdin = open("/dev/null", "r") sys.stderr = sys.stdout # Exit if perfmon already running if os.path.exists(pidfile): - pid = open(pidfile, encoding="utf-8").read() + with open(pidfile, encoding="utf-8") as file: + pid = file.read() + if os.path.exists("/proc/%s" % pid): log_err("perfmon already running - exiting") sys.exit(3) try: # Write out pidfile - fd = open(pidfile, "w", encoding="utf-8") - fd.write("%d" % os.getpid()) - fd.close() + with open(pidfile, "w", encoding="utf-8") as fd: + fd.write("%d" % os.getpid()) # run the main loop rc = main() From 9942089f583165171e07b8bec9a6eecc21d150ee Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 07:54:59 +0000 Subject: [PATCH 038/341] CP-47653: Fix pylint `raise-missing-from` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 6c2b50b1957..64922e9371f 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -525,7 +525,7 @@ class VariableConfig: try: name = xmldoc.getElementsByTagName("name")[0].getAttribute("value") except IndexError: - raise XmlConfigException("variable missing 'name' tag") + raise XmlConfigException("variable missing 'name' tag") from None def get_value(tag): try: @@ -550,7 +550,7 @@ class VariableConfig: except: raise XmlConfigException( "variable %s: regex %s does not compile" % (name, rrd_regex) - ) + ) from None if consolidation_fn not in supported_consolidation_functions: raise XmlConfigException( @@ -565,7 +565,7 @@ class VariableConfig: raise XmlConfigException( "variable %s: alarm_trigger_period %s not an int" % (name, alarm_trigger_period) - ) + ) from None try: self.alarm_auto_inhibit_period = int(alarm_auto_inhibit_period) @@ -573,14 +573,14 @@ class VariableConfig: raise XmlConfigException( "variable %s: alarm_auto_inhibit_period %s not an int" % (name, alarm_auto_inhibit_period) - ) + ) from None try: trigger_level = float(alarm_trigger_level) except: raise XmlConfigException( "variable %s: alarm_trigger_level %s not a float" % (name, alarm_trigger_level) - ) + ) from None self.alarm_priority = alarm_priority @@ -1318,7 +1318,7 @@ def main(): ], ) except getopt.GetoptError: - raise UsageException + raise UsageException from None for opt, arg in opts: if opt in ("-i", "--interval"): From 64839b6b81d4d80c52592c3769ce9deffd1195e0 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 08:01:18 +0000 Subject: [PATCH 039/341] CP-47653: Fix pylint `bare-except` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 64922e9371f..3e76c637280 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -112,6 +112,9 @@ class UsageException(Exception): pass +class NotGetValueException(Exception): + pass + # Start a session with the master of a pool. # Note: when calling http://localhost/rrd_update we must pass the session # ID as a param. The host then uses this to verify our validity with @@ -153,7 +156,7 @@ class ObjectReport: def get_value(self, var_name, row): try: return (self.vars[var_name])[row] - except: + except NotGetValueException: return 0.0 def insert_value(self, var_name, index, value): @@ -388,7 +391,7 @@ class RRDUpdates: "Return an ObjectReport for the object with this uuid" try: return self.report.obj_reports[uuid] - except: + except NotGetValueException: return None def get_uuid_list_by_objtype(self, objtype): @@ -530,7 +533,7 @@ class VariableConfig: def get_value(tag): try: return xmldoc.getElementsByTagName(tag)[0].getAttribute("value") - except: + except NotGetValueException: return get_default_variable_config(name, tag) rrd_regex = get_value("rrd_regex") @@ -1415,7 +1418,7 @@ def main(): host_uuid = rrd_updates.get_uuid_list_by_objtype("host")[ 0 ] # should only ever be one of these - except: + except NotGetValueException: # list may be empty! host_uuid = None From 14045207b0718ba05a3076f9aaa80d855e3441a3 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 08:29:25 +0000 Subject: [PATCH 040/341] CP-47653: Disable some pylint checks There are some long functions in the code. So disable the below checks: too-many-locals / too-many-statements / too-many-return-statements Signed-off-by: Stephen Cheng --- pyproject.toml | 4 ++++ python3/bin/perfmon | 22 +++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index addefd26e72..d171bf88358 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,6 +38,7 @@ ensure_newline_before_comments = false disable = [ "missing-function-docstring", "missing-module-docstring", + "missing-class-docstring", "consider-using-f-string", "too-many-branches", "too-many-arguments", @@ -50,6 +51,9 @@ disable = [ "unspecified-encoding", "protected-access", "no-member", # Some mutiple inheritance classes may have this issue + "too-many-locals", # Long functions. Need to refine the code + "too-many-statements", + "too-many-return-statements" ] [tool.mypy] diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 3e76c637280..c0adff37960 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -261,19 +261,19 @@ class RRDContentHandler(sax.ContentHandler): self.in_v_tag = True def characters(self, chars): - if ( - self.in_start_tag - or self.in_step_tag - or self.in_end_tag - or self.in_rows_tag - or self.in_columns_tag - or self.in_entry_tag - or + conditions = [ + self.in_start_tag, + self.in_step_tag, + self.in_end_tag, + self.in_rows_tag, + self.in_columns_tag, + self.in_entry_tag, + self.in_t_tag, + self.in_v_tag # self.in_row_tag # ignore text under row tag, s are just for holding and nodes - self.in_t_tag - or self.in_v_tag - ): + ] + if any(conditions): self.raw_text += chars def endElement(self, name): From 8a444f7ba6cf22997c9a6d84b73217ccc853dd35 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 08:50:26 +0000 Subject: [PATCH 041/341] CP-47653: Fix pylint dict related warnings use-dict-literal, dangerous-default-value, consider-using-dict-comprehension Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index c0adff37960..90063c35a67 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -95,8 +95,8 @@ def debug_mem(): objCount[name] = 1 output = [] - for name in objCount: - output.append("%s :%s" % (name, objCount[name])) + for name, cnt in objCount.items(): + output.append("%s :%s" % (name, cnt)) log_info("\n".join(output)) @@ -219,9 +219,12 @@ class RRDContentHandler(sax.ContentHandler): """ def __init__(self, report): - "report is saved and later updated by this object. report should contain defaults already" + ''' + report is saved and later updated by this object. + report should contain defaults already + ''' + super().__init__() self.report = report - self.in_start_tag = False self.in_step_tag = False self.in_end_tag = False @@ -260,7 +263,7 @@ class RRDContentHandler(sax.ContentHandler): elif name == "v": self.in_v_tag = True - def characters(self, chars): + def characters(self, content): conditions = [ self.in_start_tag, self.in_step_tag, @@ -274,7 +277,7 @@ class RRDContentHandler(sax.ContentHandler): # ignore text under row tag, s are just for holding and nodes ] if any(conditions): - self.raw_text += chars + self.raw_text += content def endElement(self, name): if name == "start": @@ -344,7 +347,7 @@ class RRDUpdates: def __init__(self): # params are what get passed to the CGI executable in the URL - self.params = dict() + self.params = {} self.params["start"] = int(time.time()) - interval # interval seconds ago self.params["host"] = "true" # include data for host (as well as for VMs) self.params["sr_uuid"] = "all" # include data for all SRs attached to this host @@ -357,8 +360,10 @@ class RRDUpdates: def __repr__(self): return "" % str(self.params) - def refresh(self, session, override_params={}): + def refresh(self, session, override_params=None): "reread the rrd_updates over CGI and parse" + if override_params is None: + override_params = {} params = override_params params["session_id"] = session.id() params.update(self.params) @@ -467,13 +472,16 @@ def get_percent_mem_usage(ignored): try: with open("/proc/meminfo", "r", encoding="utf-8") as memfd: memlist = memfd.readlines() - memdict = [m.split(":", 1) for m in memlist] - memdict = dict( - [ - (k.strip(), float(re.search(r"\d+", v.strip()).group(0))) - for (k, v) in memdict - ] - ) + # memorylists is a list of lists, each list contains two parts: memtype and size + memorylists = [m.split(":", 1) for m in memlist] + memdict = {} + for item in memorylists: + memtype = item[0].strip() + size = item[1].strip() + match = re.search(r"\d+", size.strip()) + if match is None: + raise NotGetValueException + memdict[memtype] = float(match.group(0)) # We consider the sum of res memory and swap in use as the hard demand # of mem usage, it is bad if this number is beyond the physical mem, as # in such case swapping is obligatory rather than voluntary, hence From bec60a32b018fe59b8810a951c31b8a502c1cb5b Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 18 Mar 2024 10:29:02 +0000 Subject: [PATCH 042/341] CP-47653: Fix exception issues - In python3, socket.error, IOError are merged into OSError - ConnectionRefusedError is a subclass of OSError: - ConnectionRefusedError -> ConnectionError -> OSError - urllib.error.HTTPError is a subclass of OSError: - urllib.error.HTTPError <- urllib.error.URLError <- OSError - HTTPError doesn't have content in `args`. So we can't use `e.args[0]` Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 64 +++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 90063c35a67..992d6966654 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -115,6 +115,10 @@ class UsageException(Exception): class NotGetValueException(Exception): pass + +class IncorrectInputException(Exception): + pass + # Start a session with the master of a pool. # Note: when calling http://localhost/rrd_update we must pass the session # ID as a param. The host then uses this to verify our validity with @@ -508,7 +512,7 @@ def get_percent_sr_usage(mylist): """ try: if len(mylist) != 2: - raise Exception( + raise IncorrectInputException( "Incorrect number of values to consolidate: %d (exactly 2 values)" % len(mylist) ) @@ -568,6 +572,8 @@ class VariableConfig: "variable %s: consolidation function %s not supported" % (name, consolidation_fn) ) + # It's fine to use eval here + # pylint: disable=eval-used self.consolidation_fn = eval(consolidation_fn) try: @@ -702,14 +708,12 @@ class ObjectMonitor: try: self.__parse_xmlconfig() except XmlConfigException as e: - errmsg = "\n".join([str(x) for x in e.args]) log_err( - "%s %s config error: %s" % (self.monitortype, self.uuid, errmsg) + "%s %s config error: %s" % (self.monitortype, self.uuid, str(e)) ) except ExpatError as e: - errmsg = "\n".join([str(x) for x in e.args]) log_err( - "%s %s XML parse error: %s" % (self.monitortype, self.uuid, errmsg) + "%s %s XML parse error: %s" % (self.monitortype, self.uuid, str(e)) ) return True else: @@ -1130,16 +1134,14 @@ class HOSTMonitor(ObjectMonitor): try: self.__parse_secondary_xmlconfigs() except XmlConfigException as e: - errmsg = "\n".join([str(x) for x in e.args]) log_err( "%s %s secondary config error: %s" - % (self.monitortype, self.uuid, errmsg) + % (self.monitortype, self.uuid, str(e)) ) except ExpatError as e: - errmsg = "\n".join([str(x) for x in e.args]) log_err( "%s %s secondary XML parse error: %s" - % (self.monitortype, self.uuid, errmsg) + % (self.monitortype, self.uuid, str(e)) ) if main_changed or secondary_changed: @@ -1253,6 +1255,8 @@ def update_all_xmlconfigs(session): (SR, host or VM) to the xml config string in other-config:perfmon keys and update sruuids_by_hostuuid which together with all_xmlconfigs allows lookup of the other-config:perfmon xml of the SRs connected to a host""" + # `all_xmlconfigs` and `sruuids_by_hostuuid` are updated by clear() and update() + # pylint: disable=global-variable-not-assigned global all_xmlconfigs global sruuids_by_hostuuid @@ -1472,35 +1476,28 @@ def main(): for sr_mon in sr_mon_lookup.values(): sr_mon.process_rrd_updates(rrd_updates, session) - except socket.error as e: - if e.args[0] == 111: - # "Connection refused" - # this happens when we try to restart session and *that* fails - time.sleep(2) - pass - + except ConnectionRefusedError as e: + # "Connection refused[111]" + # this happens when we try to restart session and *that* fails + time.sleep(2) log_err( - "caught socket.error: (%s) - restarting XAPI session" - % " ".join([str(x) for x in e.args]) + "caught connection refused error: (%s) - restarting XAPI session" + % str(e) ) restart_session = True - - except IOError as e: - if e.args[0] == "http error" and e.args[1] in (401, 500): - # Error getting rrd_updates: 401=Unauthorised, 500=Internal - start new session - pass - elif e.args[0] == "socket error": - # This happens if we send messages or - # read other-config:perfmon after xapi is restarted - pass + except urllib.error.HTTPError as e: + if e.code in (401, 500): + # Error getting rrd_updates: 401=Unauthorised, 500=Internal + # start new session + log_err("caught http.error: (%s) - restarting XAPI session" % str(e)) + restart_session = True else: # Don't know why we got this error - crash, die and look at logs later raise - - log_err( - "caught IOError: (%s) - restarting XAPI session" - % " ".join([str(x) for x in e.args]) - ) + except OSError as e: + # This happens if we send messages or + # read other-config:perfmon after xapi is restarted + log_err("caught connection error: (%s) - restarting XAPI session" % str(e)) restart_session = True runs += 1 @@ -1608,9 +1605,8 @@ if __name__ == "__main__": # Python built-in Exception has args, # but XenAPI.Failure has details instead. Sigh. try: - err_msg = "\n".join([str(x) for x in exp.args]) # print the exception args nicely - log_err(err_msg) + log_err(str(exp)) except Exception: try: err_msg = "\n".join([str(x) for x in exp.details]) # pytype: disable=attribute-error From 2533874dbeb0a04487299856e0b9d314d9b861fc Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 19 Mar 2024 01:51:21 +0000 Subject: [PATCH 043/341] CP-47653: Move pylint disable statements to the specific lines. Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 992d6966654..f0b64c347e7 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -29,14 +29,7 @@ # # The "cf" CGI param specfies the row. (All rows are returned if it's missing.) -# pylint: disable=too-many-lines, missing-function-docstring, missing-module-docstring -# pylint: disable=consider-using-f-string, missing-class-docstring, too-few-public-methods -# pylint: disable=too-many-instance-attributes, import-error, unnecessary-pass -# pylint: disable=too-many-branches, too-many-arguments, broad-exception-caught -# pylint: disable=no-else-break, no-else-return, invalid-name - -# pylint: disable=global-statement - +# pylint: disable=too-many-lines, missing-class-docstring import subprocess import gc @@ -169,6 +162,7 @@ class ObjectReport: self.vars[var_name].insert(index, value) +# pylint: disable=too-few-public-methods class RRDReport: "This is just a data structure passed that is completed by RRDContentHandler" @@ -192,6 +186,7 @@ class RRDColumn: self.obj_report = obj_report +# pylint: disable=too-many-instance-attributes class RRDContentHandler(sax.ContentHandler): """Handles data in this format: @@ -523,6 +518,7 @@ def get_percent_sr_usage(mylist): return 0.0 +# pylint: disable=too-few-public-methods class VariableConfig: """Object storing the configuration of a Variable @@ -1310,7 +1306,7 @@ config_update_period = 1800 cmdsockname = "\0perfmon" cmdmaxlen = 256 - +# pylint: disable=global-statement def main(): global interval global interval_percent_dither From 39d29feff13c4403a21df9dce8b97a51000e05aa Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 19 Mar 2024 02:01:11 +0000 Subject: [PATCH 044/341] CP-47653: Disable pytype `attribute-error` warnings Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index f0b64c347e7..22f597de601 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -614,6 +614,10 @@ class VariableState: def __init__(self): self.value = None + # Attributes `alarm_auto_inhibit_period` and `alarm_trigger_period` are defined + # in VariableConfig, and Class Varialbe multiple inherit from + # VariableConfig and VariableState + # pytype: disable=attribute-error self.timeof_last_alarm = time.time() - self.alarm_auto_inhibit_period self.trigger_down_counter = self.alarm_trigger_period @@ -1605,7 +1609,9 @@ if __name__ == "__main__": log_err(str(exp)) except Exception: try: - err_msg = "\n".join([str(x) for x in exp.details]) # pytype: disable=attribute-error + # As the comment above said, the XenAPI.Failure has `details` + # pytype: disable=attribute-error + err_msg = "\n".join([str(x) for x in exp.details]) # print the exception args nicely log_err(err_msg) except Exception: From 36fbf20d13f26dee37e6cb88d2af2d1d2a47ea29 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 19 Mar 2024 06:40:13 +0000 Subject: [PATCH 045/341] CP-47653: Apply pytype to the new path Signed-off-by: Stephen Cheng --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d171bf88358..739597edef4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,6 @@ discard_messages_matching = [ "No Node.TEXT_NODE in module xml.dom.minidom, referenced from 'xml.dom.expatbuilder'" ] expected_to_fail = [ - "scripts/perfmon", # Need 2to3 -w and maybe a few other minor updates: "scripts/hatests", "scripts/backup-sr-metadata.py", @@ -111,7 +110,6 @@ expected_to_fail = [ [tool.pytype] inputs = [ - "scripts/perfmon", "scripts/static-vdis", "scripts/Makefile", "scripts/generate-iscsi-iqn", @@ -129,6 +127,7 @@ inputs = [ # Python 3 "python3/bin/hfx_filename", + "python3/bin/perfmon", "python3/bin/*.py", "python3/libexec/*.py", From 9c32647aa462f3dd3a533cb7c3c93462db4c35c8 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 19 Mar 2024 10:13:00 +0000 Subject: [PATCH 046/341] CP-47653: Use general exception for not getting data. Previously, for fixing the pylint, I used a specific exception for not getting data. But by testing, it didn't catch the index error. Not sure if there are any other exceptions. So just keep the original logic, use the general exception. Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 22f597de601..30ba6a235cf 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -30,6 +30,7 @@ # The "cf" CGI param specfies the row. (All rows are returned if it's missing.) # pylint: disable=too-many-lines, missing-class-docstring +# pytype: disable=attribute-error import subprocess import gc @@ -105,10 +106,6 @@ class UsageException(Exception): pass -class NotGetValueException(Exception): - pass - - class IncorrectInputException(Exception): pass @@ -153,7 +150,7 @@ class ObjectReport: def get_value(self, var_name, row): try: return (self.vars[var_name])[row] - except NotGetValueException: + except Exception: return 0.0 def insert_value(self, var_name, index, value): @@ -395,7 +392,7 @@ class RRDUpdates: "Return an ObjectReport for the object with this uuid" try: return self.report.obj_reports[uuid] - except NotGetValueException: + except Exception: return None def get_uuid_list_by_objtype(self, objtype): @@ -473,14 +470,12 @@ def get_percent_mem_usage(ignored): memlist = memfd.readlines() # memorylists is a list of lists, each list contains two parts: memtype and size memorylists = [m.split(":", 1) for m in memlist] - memdict = {} - for item in memorylists: - memtype = item[0].strip() - size = item[1].strip() - match = re.search(r"\d+", size.strip()) - if match is None: - raise NotGetValueException - memdict[memtype] = float(match.group(0)) + memdict = { + # pytype complained that No attribute 'group' on None + # Let Exception catch the `not matched` issue and return 0.0 + k.strip(): float(re.search(r"\d+", v.strip()).group(0)) + for (k, v) in memorylists + } # We consider the sum of res memory and swap in use as the hard demand # of mem usage, it is bad if this number is beyond the physical mem, as # in such case swapping is obligatory rather than voluntary, hence @@ -541,7 +536,7 @@ class VariableConfig: def get_value(tag): try: return xmldoc.getElementsByTagName(tag)[0].getAttribute("value") - except NotGetValueException: + except Exception: return get_default_variable_config(name, tag) rrd_regex = get_value("rrd_regex") @@ -617,7 +612,6 @@ class VariableState: # Attributes `alarm_auto_inhibit_period` and `alarm_trigger_period` are defined # in VariableConfig, and Class Varialbe multiple inherit from # VariableConfig and VariableState - # pytype: disable=attribute-error self.timeof_last_alarm = time.time() - self.alarm_auto_inhibit_period self.trigger_down_counter = self.alarm_trigger_period @@ -1430,7 +1424,7 @@ def main(): host_uuid = rrd_updates.get_uuid_list_by_objtype("host")[ 0 ] # should only ever be one of these - except NotGetValueException: + except Exception: # list may be empty! host_uuid = None @@ -1609,8 +1603,6 @@ if __name__ == "__main__": log_err(str(exp)) except Exception: try: - # As the comment above said, the XenAPI.Failure has `details` - # pytype: disable=attribute-error err_msg = "\n".join([str(x) for x in exp.details]) # print the exception args nicely log_err(err_msg) From 8576a334c42cc98d95d6227412c9500c30d69887 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 21 Mar 2024 02:19:16 +0000 Subject: [PATCH 047/341] CP-47653: Fix a minor bug where the variable "in_v_tag" was incorrectly written as "in_t_tag." Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 30ba6a235cf..6b6dd34ef94 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -334,7 +334,7 @@ class RRDContentHandler(sax.ContentHandler): # Update position in row self.col += 1 - self.in_t_tag = False + self.in_v_tag = False # An object of this class should persist the lifetime of the program From 9ff99ae2db59c4e8693f6b20884c7c0887fe84c1 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 22 Mar 2024 06:44:17 +0000 Subject: [PATCH 048/341] CP-47653: Add unit tests for perfmon Signed-off-by: Stephen Cheng --- .github/workflows/main.yml | 4 +- python3/bin/perfmon | 17 +- python3/unittest/test_nbd_client_manager.py | 3 - python3/unittest/test_perfmon.py | 600 ++++++++++++++++++++ 4 files changed, 612 insertions(+), 12 deletions(-) create mode 100644 python3/unittest/test_perfmon.py diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a51f40e91e6..9d55ec60312 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -72,8 +72,8 @@ jobs: if: ${{ matrix.python-version != '2.7' }} run: > pytest - --cov=python3/unittest - python3/unittest -vv -rA + --cov=python3/ + python3/unittest python3/tests -vv -rA --junitxml=.git/pytest${{matrix.python-version}}.xml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 6b6dd34ef94..669182f5ec4 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -56,7 +56,7 @@ from xml.parsers.expat import ExpatError import XenAPI -def print_debug(string): +def print_debug(string): # pragma: no cover if debug: print("DEBUG:", string, file=sys.stderr) syslog.syslog(syslog.LOG_USER | syslog.LOG_INFO, "PERFMON(DEBUG): %s" % string) @@ -68,13 +68,13 @@ def log_err(string): pass -def log_info(string): +def log_info(string): # pragma: no cover print(string, file=sys.stderr) syslog.syslog(syslog.LOG_INFO | syslog.LOG_INFO, "PERFMON: %s" % string) pass -def debug_mem(): +def debug_mem(): # pragma: no cover objCount = {} gc.collect() objList = gc.get_objects() @@ -114,7 +114,7 @@ class IncorrectInputException(Exception): # ID as a param. The host then uses this to verify our validity with # the master before responding. # If the verification fails we should get a 401 response -class XapiSession(XenAPI.Session): +class XapiSession(XenAPI.Session): # pragma: no cover """Object that represents a XenAPI session with the pool master One of these is needed to refresh a VMMonitor or HOSTMonitor config, or to refresh an RRDUpdates object @@ -420,6 +420,9 @@ supported_consolidation_functions = [ def average(mylist): + if not mylist: + log_err("Error in average, no input data, return 0.0 instead") + return 0.0 return sum(mylist) / float(len(mylist)) @@ -1305,7 +1308,7 @@ cmdsockname = "\0perfmon" cmdmaxlen = 256 # pylint: disable=global-statement -def main(): +def main(): # pragma: no cover global interval global interval_percent_dither global rrd_step @@ -1523,7 +1526,7 @@ def main(): return 0 -def sigterm_handler(sig, stack_frame): +def sigterm_handler(sig, stack_frame): # pragma: no cover _ = stack_frame # unused: not sure if it'll be used later, passing pylint log_err("Caught signal %d - exiting" % sig) sys.exit(1) @@ -1531,7 +1534,7 @@ def sigterm_handler(sig, stack_frame): pidfile = "/var/run/perfmon.pid" -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover # setup signal handler to print out notice when killed signal.signal(signal.SIGTERM, sigterm_handler) diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index 8ead3cfa580..48ca22be297 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -11,9 +11,6 @@ nbd_client_manager = get_module("nbd_client_manager", "../libexec/nbd_client_manager.py") -# mock modules to avoid dependencies -sys.modules["XenAPI"] = MagicMock() - @patch('subprocess.Popen') class TestCallFunction(unittest.TestCase): diff --git a/python3/unittest/test_perfmon.py b/python3/unittest/test_perfmon.py new file mode 100644 index 00000000000..a61e66aeaa4 --- /dev/null +++ b/python3/unittest/test_perfmon.py @@ -0,0 +1,600 @@ +#!/usr/bin/env python3 +""" +This module provides unittest for perfmon +""" + +import sys +import math +import unittest +from mock import MagicMock, patch, mock_open +from import_file import get_module + +# mock modules to avoid dependencies +sys.modules["XenAPI"] = MagicMock() + +perfmon = get_module("perfmon", "../bin/perfmon") + + +@patch("subprocess.getoutput") +class TestGetFsUsage(unittest.TestCase): + '''Test get_percent_log_fs_usage and get_percent_fs_usage''' + def mock_subprocess_getoutput(self, cmd): + df_etc_passwd = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda1 18402132 2244748 15213668 13% / + """ + df_var_log = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 4054752 59820 3785220 2% /var/log + """ + if cmd == "df /etc/passwd": + return df_etc_passwd + if cmd == "df /var/log": + return df_var_log + return None + + def mock_subprocess_getoutput_same_file_system(self, cmd): + df_etc_passwd = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 18402132 2244748 15213668 13% / + """ + df_var_log = r"""Filesystem 1K-blocks Used Available Use% Mounted on + /dev/sda5 4054752 59820 3785220 2% /var/log + """ + if cmd == "df /etc/passwd": + return df_etc_passwd + if cmd == "df /var/log": + return df_var_log + return None + + def test_get_percent_log_fs_usage(self, mock_getoutput): + """Assert that get_percent_log_fs_usage returns as expected""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput + + expected_percentage = 0.02 + test_percentage = perfmon.get_percent_log_fs_usage(None) + self.assertAlmostEqual(test_percentage, expected_percentage, 7) + + def test_get_percent_log_fs_usage_same_file_system(self, mock_getoutput): + """Test where /etc/passwd and /var/log are in the same filesystem""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput_same_file_system + + test_percentage = perfmon.get_percent_log_fs_usage(None) + self.assertTrue(math.isnan(test_percentage)) + + def test_get_percent_fs_usage(self, mock_getoutput): + """Assert that get_percent_fs_usage returns as expected""" + mock_getoutput.side_effect = self.mock_subprocess_getoutput + + expected_percentage = 0.13 + test_percentage = perfmon.get_percent_fs_usage(None) + self.assertAlmostEqual(test_percentage, expected_percentage, 7) + + +class TestGetMemUsage(unittest.TestCase): + '''Test get_percent_mem_usage ''' + + meminfo = '''MemTotal: 2580464 kB + MemFree: 1511024 kB + MemAvailable: 2210924 kB + Buffers: 95948 kB + Cached: 518164 kB + SwapCached: 0 kB + Active: 424468 kB + Inactive: 390016 kB + Active(anon): 207944 kB + Inactive(anon): 8740 kB + Active(file): 216524 kB + Inactive(file): 381276 kB + Unevictable: 13620 kB + Mlocked: 13620 kB + SwapTotal: 1048572 kB + SwapFree: 1048572 kB''' + @patch("builtins.open", new_callable=mock_open, read_data=meminfo) + def test_get_percent_mem_usage(self, _): + self.assertAlmostEqual(perfmon.get_percent_mem_usage([]), 0.17645198692948244) + + @patch('builtins.open', side_effect=Exception) + def test_get_percent_mem_usage_exception(self, _): + self.assertEqual(perfmon.get_percent_mem_usage(None), 0.0) + + +class TestGetPercentSRUsage(unittest.TestCase): + '''Test get_percent_sr_usage ''' + + def test_get_percent_sr_usage_correct_input(self): + input_list = [100, 200] + expected_result = 0.5 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_incorrect_input(self): + input_list = [100] # Incorrect input, expecting two values + expected_result = 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_zero_division(self): + input_list = [0, 200] # Physical utilization is 0 + expected_result = 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + def test_get_percent_sr_usage_exception_handling(self): + input_list = ["invalid", 200] # Invalid input, should raise an exception + expected_result = 0.0 # Since exception is handled, function should return 0.0 + self.assertAlmostEqual(perfmon.get_percent_sr_usage(input_list), + expected_result) + + +class TestAverage(unittest.TestCase): + '''Test get_percent_sr_usage ''' + def test_average_empty_list(self): + result = perfmon.average([]) + self.assertEqual(result, 0.0) + + def test_average_single_element_list(self): + result = perfmon.average([5]) + self.assertEqual(result, 5.0) + + def test_average_positive_numbers(self): + result = perfmon.average([1, 2, 3, 4, 5]) + self.assertEqual(result, 3.0) + + +class TestUpdateAllXMLConfigs(unittest.TestCase): + '''Test update_all_xmlconfigs''' + def test_update_all_xmlconfigs(self): + + perfmon.all_xmlconfigs = {} + perfmon.sruuids_by_hostuuid = {} + + host_uuid = '28a574e4-bf57-4476-a83d-72cba7578d23' + vm_uuid = '2cf37285-57bc-4633-a24f-0c6c825dda66' + sr_uuid = '0e7f8fb3-1ba2-4bce-9889-48812273a316' + perfmon_config = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' + + mock_session = MagicMock() + mock_session.xenapi.host.get_all_records.return_value = { + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a':{ + 'uuid': host_uuid, + 'name_label': 'xrtuk-11-43', + 'name_description': 'Default install', + 'memory_overhead': '631816192', + 'software_version': { + 'product_version': '8.4.0', 'product_version_text': '8', + 'product_version_text_short': '8', 'platform_name': 'XCP', + 'platform_version': '3.4.0', 'product_brand': 'XenServer', + 'build_number': 'stream', 'git_id': '0', 'hostname': 'localhost', + 'date': '20240229T15:07:05Z', 'dbv': '2024.0229', + 'is_preview_release': 'false', 'xapi': '24.11', + 'xapi_build': '24.11.0', 'xen': '4.17.3-4', + 'linux': '4.19.0+1', 'xencenter_min': '2.21', + 'xencenter_max': '2.21', 'network_backend': 'openvswitch', + 'db_schema': '5.775'}, + 'other_config': { + 'iscsi_iqn': 'iqn.2024-03.xenrtcloud:339cd227', + 'agent_start_time': '1710910331.', + 'boot_time': '1710910266.', + 'perfmon': perfmon_config} + } + } + mock_session.xenapi.VM.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': vm_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + } + } + } + mock_session.xenapi.SR.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': sr_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + }, + 'PBDs': ['pbd1', 'pbd2'] + } + } + # One SR is connected to two hosts + mock_session.xenapi.PBD.get_host.return_value = \ + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a' + + + # Call the function to test + perfmon.update_all_xmlconfigs(mock_session) + + # Check that all_xmlconfigs and sruuids_by_hostuuid were updated correctly + expect_xmlconfigs = { + host_uuid: perfmon_config, + vm_uuid: perfmon_config, + sr_uuid: perfmon_config + } + self.assertEqual(perfmon.all_xmlconfigs, expect_xmlconfigs) + print(perfmon.sruuids_by_hostuuid) + self.assertEqual(perfmon.sruuids_by_hostuuid, {host_uuid: {sr_uuid}}) + +class TestObjectReport(unittest.TestCase): + '''Test Class ObjectReport ''' + def setUp(self): + # Create an instance of ObjectReport for testing + self.obj_report = perfmon.ObjectReport(objtype="vm", + uuid="e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e") + + def test_get_uuid(self): + self.assertEqual(self.obj_report.get_uuid(), + "e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e") + + def test_get_var_names(self): + # Initially, there are no variables, so the list should be empty + self.assertEqual(self.obj_report.get_var_names(), []) + + # Insert a variable and check if it appears in the list + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.get_var_names(), ["cpu_usage"]) + + def test_get_value(self): + # Insert a value for a variable and retrieve it + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.get_value("cpu_usage", 0), 0.5) + + # Trying to retrieve a value for a non-existing variable should return 0.0 + self.assertEqual(self.obj_report.get_value("memory_usage", 0), 0.0) + + def test_insert_value(self): + # Insert a value for a variable and check if it's stored correctly + self.obj_report.insert_value("cpu_usage", 0, 0.5) + self.assertEqual(self.obj_report.vars["cpu_usage"], [0.5]) + + # Insert another value for the same variable and check if it's stored correctly + self.obj_report.insert_value("cpu_usage", 1, 0.6) + self.assertEqual(self.obj_report.vars["cpu_usage"], [0.5, 0.6]) + + +@patch("perfmon.XapiSession") +@patch("perfmon.get_percent_fs_usage") +@patch("perfmon.get_percent_log_fs_usage") +@patch("perfmon.get_percent_mem_usage") +class TestVMMonitor(unittest.TestCase): + '''Test getting VM performance data from VMMonitor''' + + def test_process_rrd_updates(self, mock_get_percent_mem_usage, + mock_get_percent_log_fs_usage, + mock_get_percent_fs_usage, + mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + + + + + + + + + + + + '''} + monitor = perfmon.VMMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'memory': [2785000000.0, 2785000000.0, 2785000000.0, + 2785000000.0, 2785000000.0] + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 1 + session = mock_xapisession() + + mock_get_percent_fs_usage.return_value = 0.12 + mock_get_percent_mem_usage.return_value = 0.17380 + mock_get_percent_log_fs_usage.return_value = float("NaN") + monitor.process_rrd_updates(rrd_updates, session) + mock_get_percent_fs_usage.assert_called() + mock_get_percent_log_fs_usage.assert_called() + mock_get_percent_mem_usage.assert_called() + self.assertAlmostEqual(monitor.variables[0].value, 0.12) + self.assertAlmostEqual(monitor.variables[1].value, 0.17380) + self.assertTrue(math.isnan(monitor.variables[2].value)) + + +class TestHOSTMonitor(unittest.TestCase): + '''Test getting HOST performance data from HOSTMonitor''' + + @patch("perfmon.XapiSession") + def test_process_rrd_updates(self, mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + '''} + monitor = perfmon.HOSTMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'memory': [2785000000.0, 2785000000.0, 2785000000.0, + 2785000000.0, 2785000000.0] + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 5 + session = mock_xapisession() + + monitor.process_rrd_updates(rrd_updates, session) + # Average of cpu0-cpu7 (row 5) + # [0.0048923, 0.0053645, 0.0056833, 0.0048769, + # 0.0050993, 0.0062017, 0.0050934, 0.0049544] + self.assertAlmostEqual(monitor.variables[0].value, 0.005270725) + + def test_refresh_config(self): + perfmon.all_xmlconfigs = {} + perfmon.sruuids_by_hostuuid = {} + + host_uuid = '28a574e4-bf57-4476-a83d-72cba7578d23' + sr_uuid = '0e7f8fb3-1ba2-4bce-9889-48812273a316' + perfmon_config = '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' \ + '' + + mock_session = MagicMock() + mock_session.xenapi.host.get_all_records.return_value = { + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a':{ + 'uuid': host_uuid, + 'other_config': { + 'iscsi_iqn': 'iqn.2024-03.xenrtcloud:339cd227', + 'agent_start_time': '1710910331.', + 'boot_time': '1710910266.', + 'perfmon': perfmon_config} + } + } + mock_session.xenapi.SR.get_all_records.return_value = { + 'OpaqueRef:fffc65bb-b909-03b2-c20a-8277434a4495': { + 'uuid': sr_uuid, + 'other_config': { + 'storage_driver_domain': 'OpaqueRef:11de3275-b5e4-a56c-a295', + 'is_system_domain': 'true', 'perfmon': perfmon_config + }, + 'PBDs': ['pbd1', 'pbd2'] + } + } + mock_session.xenapi.PBD.get_host.return_value = \ + 'OpaqueRef:8be06dc8-bed5-4d81-d030-937eca11094a' + perfmon.update_all_xmlconfigs(mock_session) + monitor = perfmon.HOSTMonitor(host_uuid) + monitor.refresh_config() + expected_sruuids = {sr_uuid} + self.assertEqual(set(monitor.secondary_xmlconfigs), expected_sruuids) + + +@patch("perfmon.XapiSession") +class TestSRMonitor(unittest.TestCase): + '''Test getting SR performance data from SrMonitor''' + def test_process_rrd_updates(self, mock_xapisession): + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + perfmon.all_xmlconfigs = {'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e': + ''' + '''} + monitor = perfmon.SRMonitor(uuid) + rrd_updates = perfmon.RRDUpdates() + obj_report = perfmon.ObjectReport("vm", uuid) + obj_report.vars = { + 'size': [100, 200, 300, 400, 500], + 'physical_utilisation': [2000, 3000, 4000, 5000, 6000], + } + rrd_updates.report.obj_reports[uuid] = obj_report + rrd_updates.report.rows = 5 + session = mock_xapisession() + + monitor.process_rrd_updates(rrd_updates, session) + # get_percent_sr_usage([500, 6000]) + self.assertAlmostEqual(monitor.variables[0].value, 0.08333333333333333) + + +class TestRRDUpdates(unittest.TestCase): + '''Test Class RRDUpdates and RRDContentHandler''' + + @patch('time.time', return_value=100000) + def test_init(self, _): + rrd_updates = perfmon.RRDUpdates() + + expected_start = 100000 - perfmon.interval + self.assertEqual(rrd_updates.params['start'], expected_start) + self.assertEqual(rrd_updates.params["host"], "true") + self.assertEqual(rrd_updates.params["sr_uuid"], "all") + self.assertEqual(rrd_updates.params["cf"], "AVERAGE") + self.assertEqual(rrd_updates.params["interval"], str(perfmon.rrd_step)) + + + @patch('time.time', return_value=100000) + @patch("perfmon.XapiSession") + @patch('urllib.request.urlopen') + def test_refresh(self, mock_urlopen, mock_xapisession, _): + rrd_updates = perfmon.RRDUpdates() + + # mock_session + mock_session = mock_xapisession() + mock_session.id.return_value = "mocked_session_id" + + # mock xmlsource + xml = r''' + + 1213578000 + 3600 + 1213617600 + 2 + 12 + + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1 + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0 + LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory + + + + + 1213617600 # The first row corresponds to end time + 0.0 + 0.0282 + 209715200.0000 + 0.0 + 0.0201 + 209715200.0000 + 0.0 + 0.0445 + 209715200.0000 + 0.0 + 0.0243 + 209715200.0000 + + + 1213616600 #The last row corresponds to Start time + 0.0 + 0.0282 + 209715200.0000 + 0.0 + 0.0201 + 209715200.0000 + 0.0 + 0.0445 + 209715200.0000 + 0.0 + 0.0243 + 209715200.0000 + + +''' + xml_rrdupdates = xml.encode(encoding='utf-8') + cm = MagicMock() + cm.read.return_value = xml_rrdupdates + cm.__enter__.return_value = cm + mock_urlopen.return_value = cm + rrd_updates.refresh(mock_session) + + # Test __repr__ + print(rrd_updates) + + self.assertEqual(rrd_updates.get_num_rows(), 2) + self.assertIsNotNone( + rrd_updates.get_obj_report_by_uuid("ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3") + ) + self.assertIsNone( + rrd_updates.get_obj_report_by_uuid("123345") + ) + self.assertEqual(rrd_updates.get_uuid_list_by_objtype("vm"), + ["ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3"]) + + +class TestVariable(unittest.TestCase): + '''Test Class Varible''' + + def test_set_active(self): + # Construct varible node for VaribleConfig + # Not used, just for input + xmlconfig = b'' \ + b'' + xmldoc = perfmon.minidom.parseString(xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + node = variable_nodes[0] + + # Construct function alarm_create and mock_get_default_varible_config + # Not used, just for input + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + monitor = perfmon.VMMonitor(uuid) + var = perfmon.Variable(node, monitor.alarm_create, + monitor.get_default_variable_config) + + # Call set_active with active=True + var.set_active(True) + self.assertTrue(var.active) + + # Call set_active with active=False + var.set_active(False) + self.assertFalse(var.active) + + @patch("perfmon.XapiSession") + def test_update(self, mock_xapisession): + xmlconfig = b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' \ + b'' + xmldoc = perfmon.minidom.parseString(xmlconfig) + variable_nodes = xmldoc.getElementsByTagName("variable") + node = variable_nodes[0] + + uuid = 'e1ae3f5d-4c8b-4575-bbb8-2af7e8a2c31e' + monitor = perfmon.VMMonitor(uuid) + var = perfmon.Variable(node, monitor.alarm_create, + monitor.get_default_variable_config) + + session = mock_xapisession() + + # Trigger alarm + var.trigger_down_counter = 50 + var.update(0.95,session) + self.assertEqual(var.trigger_down_counter, 60) + + # Not trigger alarm - time isn't up + var.trigger_down_counter = 100 + var.update(0.95,session) + self.assertEqual(var.trigger_down_counter, 40) + + # Not trigger alarm - level good + var.trigger_down_counter = 50 + var.update(0.8,session) + self.assertEqual(var.trigger_down_counter, 60) + +if __name__ == '__main__': + unittest.main() From 99391dd71dd8db0e5baaaed9aeb91134bef97ca0 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 25 Mar 2024 05:11:12 +0000 Subject: [PATCH 049/341] CI configuration change for python unit test coverage Signed-off-by: Stephen Cheng --- .codecov.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 47ef46ac090..79d69aa0b14 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -177,10 +177,6 @@ coverage: # threshold: 20% - # Checks each Python version separately: - python-3.11: - flags: ["python3.11"] - # # Project limits # -------------- @@ -235,12 +231,12 @@ component_management: - type: project # `auto` will use the coverage from the base commit (pull request base # or parent commit) coverage to compare against. - target: auto + target: 48 threshold: 2% - type: patch - target: auto - threshold: 10% + target: 80 + threshold: 5% individual_components: From 6acbf1aab83fc3fb44bf17d58c55d43313222f1e Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 26 Mar 2024 01:09:18 +0000 Subject: [PATCH 050/341] CP-47653: Explicit exception chaining Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 669182f5ec4..4d1244856a7 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -533,8 +533,8 @@ class VariableConfig: def __init__(self, xmldoc, alarm_create_callback, get_default_variable_config): try: name = xmldoc.getElementsByTagName("name")[0].getAttribute("value") - except IndexError: - raise XmlConfigException("variable missing 'name' tag") from None + except IndexError as e: + raise XmlConfigException("variable missing 'name' tag") from e def get_value(tag): try: @@ -556,10 +556,10 @@ class VariableConfig: self.name = name try: self.rrd_regex = re.compile("^%s$" % rrd_regex) - except: + except Exception as e: raise XmlConfigException( "variable %s: regex %s does not compile" % (name, rrd_regex) - ) from None + ) from e if consolidation_fn not in supported_consolidation_functions: raise XmlConfigException( @@ -572,26 +572,26 @@ class VariableConfig: try: self.alarm_trigger_period = int(alarm_trigger_period) - except: + except Exception as e: raise XmlConfigException( "variable %s: alarm_trigger_period %s not an int" % (name, alarm_trigger_period) - ) from None + ) from e try: self.alarm_auto_inhibit_period = int(alarm_auto_inhibit_period) - except: + except Exception as e: raise XmlConfigException( "variable %s: alarm_auto_inhibit_period %s not an int" % (name, alarm_auto_inhibit_period) - ) from None + ) from e try: trigger_level = float(alarm_trigger_level) - except: + except Exception as e: raise XmlConfigException( "variable %s: alarm_trigger_level %s not a float" % (name, alarm_trigger_level) - ) from None + ) from e self.alarm_priority = alarm_priority @@ -1329,8 +1329,8 @@ def main(): # pragma: no cover "interval_percent_dither=", ], ) - except getopt.GetoptError: - raise UsageException from None + except getopt.GetoptError as e: + raise UsageException from e for opt, arg in opts: if opt in ("-i", "--interval"): @@ -1599,8 +1599,7 @@ if __name__ == "__main__": # pragma: no cover ex = sys.exc_info() err = traceback.format_exception(*ex) - # Python built-in Exception has args, - # but XenAPI.Failure has details instead. Sigh. + # XenAPI.Failure has `details`. try: # print the exception args nicely log_err(str(exp)) From 88c9dfcb09c28bd4b46d09e3c794dfa7339a323a Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 27 Mar 2024 08:44:31 +0000 Subject: [PATCH 051/341] CP-47653: Fix a `bytes-str` bug Also add the fix for scripts/plugins/perfmon Signed-off-by: Stephen Cheng --- python3/bin/perfmon | 21 ++++++++------------- scripts/plugins/perfmon | 5 +++-- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index 4d1244856a7..e5c6741b2d3 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -358,9 +358,9 @@ class RRDUpdates: def refresh(self, session, override_params=None): "reread the rrd_updates over CGI and parse" - if override_params is None: - override_params = {} - params = override_params + params = {} + if override_params is not None: + params = override_params params["session_id"] = session.id() params.update(self.params) paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params]) @@ -426,12 +426,11 @@ def average(mylist): return sum(mylist) / float(len(mylist)) -def get_percent_log_fs_usage(ignored): +def get_percent_log_fs_usage(_): ''' Get the percent usage of the host filesystem for logs partition. Input list is ignored and should be empty ''' - _ = ignored # unused: not sure if it'll be used later, passing pylint fs_output = subprocess.getoutput("df /etc/passwd") log_fs_output = subprocess.getoutput("df /var/log") fs_output = " ".join(fs_output.splitlines()[1:]) @@ -445,13 +444,11 @@ def get_percent_log_fs_usage(ignored): return float("NaN") -def get_percent_fs_usage(ignored): +def get_percent_fs_usage(_): ''' Get the percent usage of the host filesystem. Input list is ignored and should be empty ''' - _ = ignored # unused: not sure if it'll be used later, passing pylint - # this file is on the filesystem of interest in both OEM and Retail output = subprocess.getoutput("df /etc/passwd") output = " ".join( @@ -462,12 +459,11 @@ def get_percent_fs_usage(ignored): return float(percentage[0:-1]) / 100.0 -def get_percent_mem_usage(ignored): +def get_percent_mem_usage(_): ''' Get the percent usage of Dom0 memory/swap. Input list is ignored and should be empty ''' - _ = ignored # unused: not sure if it'll be used later, passing pylint try: with open("/proc/meminfo", "r", encoding="utf-8") as memfd: memlist = memfd.readlines() @@ -1509,7 +1505,7 @@ def main(): # pragma: no cover timeout = rand(interval, interval + dither) cmdsock.settimeout(timeout) try: - cmd = cmdsock.recv(cmdmaxlen) + cmd = cmdsock.recv(cmdmaxlen).decode() except socket.timeout: pass else: @@ -1526,8 +1522,7 @@ def main(): # pragma: no cover return 0 -def sigterm_handler(sig, stack_frame): # pragma: no cover - _ = stack_frame # unused: not sure if it'll be used later, passing pylint +def sigterm_handler(sig, _): # pragma: no cover log_err("Caught signal %d - exiting" % sig) sys.exit(1) diff --git a/scripts/plugins/perfmon b/scripts/plugins/perfmon index 2186c938938..e3dc2452691 100644 --- a/scripts/plugins/perfmon +++ b/scripts/plugins/perfmon @@ -14,16 +14,17 @@ def send_perfmon_cmd(cmd): "Return True for success, or ERROR_%d: otherwise" if len(cmd) >= cmdmaxlen: return "ERROR_0: command too long" + cmd_bytes = cmd.encode() try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - rc = sock.sendto(cmd, cmdsockname) + rc = sock.sendto(cmd_bytes, cmdsockname) except socket.error as e: err, msg = e.args return "ERROR_%d: %s" % (err, msg) except Exception: return "ERROR_1: unknown error" - return str(rc == len(cmd)) + return str(rc == len(cmd_bytes)) def stop(session, args): From 1ade72d198f6d89ccd9ac3bcde70403667bd4335 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 17 Apr 2024 12:00:00 +0200 Subject: [PATCH 052/341] CA-390883: Move usb_reset.py to python3, test mount() in a namespace Use a rootless container (like unshare --map-root-user --mount) to test the correct calling convention for mount()/umount(). Use a context manager test fixture to temporarily mock module imports: This allows to mock global modules only temporary for importing the testee without affecting other tests. - Add a sufficient testcase to test usb_reset.py: mount() and umount() without mocking the system or library calls in any way. - Use python3/tests a Python tests package to allow for non-deprecated relative imports: Absolute imports within a module are deprecated. Signed-off-by: Bernhard Kaindl --- python3/Makefile | 1 + {scripts => python3/libexec}/usb_reset.py | 0 python3/tests/conftest.py | 10 +++ python3/tests/import_helper.py | 70 +++++++++++++++++++ python3/tests/rootless_container.py | 83 +++++++++++++++++++++++ python3/tests/test_usb_reset_mount.py | 14 ++++ scripts/Makefile | 1 - 7 files changed, 178 insertions(+), 1 deletion(-) rename {scripts => python3/libexec}/usb_reset.py (100%) create mode 100644 python3/tests/conftest.py create mode 100644 python3/tests/import_helper.py create mode 100644 python3/tests/rootless_container.py create mode 100644 python3/tests/test_usb_reset_mount.py diff --git a/python3/Makefile b/python3/Makefile index 26e2bdfa943..1384df9284c 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -13,6 +13,7 @@ install: $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ + $(IPROG) libexec/usb_reset.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) diff --git a/scripts/usb_reset.py b/python3/libexec/usb_reset.py similarity index 100% rename from scripts/usb_reset.py rename to python3/libexec/usb_reset.py diff --git a/python3/tests/conftest.py b/python3/tests/conftest.py new file mode 100644 index 00000000000..d0a4777e1f1 --- /dev/null +++ b/python3/tests/conftest.py @@ -0,0 +1,10 @@ +"""scripts/unit_test/conftest.py: Common pytest module for shared pytest fixtures""" +import pytest + +from .rootless_container import enter_private_mount_namespace + + +@pytest.fixture(scope="session") +def private_mount_namespace(): + """Enter a private mount namespace that allows us to test mount and unmount""" + return enter_private_mount_namespace() diff --git a/python3/tests/import_helper.py b/python3/tests/import_helper.py new file mode 100644 index 00000000000..87541c9b6cf --- /dev/null +++ b/python3/tests/import_helper.py @@ -0,0 +1,70 @@ +"""helpers for unit-testing functions in scripts without permanent global mocks""" +import os +import sys +from contextlib import contextmanager +from types import ModuleType + +from typing import Generator +from mock import Mock + + +@contextmanager +def mocked_modules(*module_names): # type:(str) -> Generator[None, None, None] + """Context manager that temporarily mocks the specified modules. + + :param module_names: Variable number of names of the modules to be mocked. + :yields: None + + During the context, the specified modules are added to the sys.modules + dictionary as instances of the ModuleType class. + This effectively mocks the modules, allowing them to be imported and used + within the context. After the context, the mocked modules are removed + from the sys.modules dictionary. + + Example usage: + ```python + with mocked_modules("module1", "module2"): + # Code that uses the mocked modules + ``` + """ + for module_name in module_names: + sys.modules[module_name] = Mock() + yield + for module_name in module_names: + sys.modules.pop(module_name) + + +def import_file_as_module(relative_script_path): # type:(str) -> ModuleType + """Import a Python script without the .py extension as a python module. + + :param relative_script_path (str): The relative path of the script to import. + :returns module: The imported module. + :raises: AssertionError: If the spec or loader is not available. + + Note: + - This function uses different methods depending on the Python version. + - For Python 2, it uses the imp module. + - For Python 3, it uses the importlib module. + + Example: + - import_script_as_module('scripts/mail-alarm') # Returns the imported module. + """ + script_path = os.path.dirname(__file__) + "/../../" + relative_script_path + module_name = os.path.basename(script_path.replace(".py", "")) + + # For Python 3.11+: Import Python script without the .py extension: + # https://gist.github.com/bernhardkaindl/1aaa04ea925fdc36c40d031491957fd3: + # pylint: disable-next=import-outside-toplevel + from importlib import ( # pylint: disable=no-name-in-module + machinery, + util, + ) + + loader = machinery.SourceFileLoader(module_name, script_path) + spec = util.spec_from_loader(module_name, loader) + assert spec + assert spec.loader + module = util.module_from_spec(spec) + sys.modules[module_name] = module + spec.loader.exec_module(module) + return module diff --git a/python3/tests/rootless_container.py b/python3/tests/rootless_container.py new file mode 100644 index 00000000000..30ff364ace3 --- /dev/null +++ b/python3/tests/rootless_container.py @@ -0,0 +1,83 @@ +"""rootless_container.py: Create a rootless container on any Linux and GitHub CI""" +import ctypes +import os + +# Unshare the user namespace, so that the calling process is moved into a new +# user namespace which is not shared with any previously existing process. +# Needed so that the current user id can be mapped to 0 for getting a new +# mount namespace. +CLONE_NEWUSER = 0x10000000 +# Unshare the mount namespace, so that the calling process has a private copy +# of its root directory namespace which is not shared with any other process: +CLONE_NEWNS = 0x00020000 +# Flags for mount(2): +MS_BIND = 4096 +MS_REC = 16384 +MS_PRIVATE = 1 << 18 + + +def unshare(flags): # type:(int) -> None + """Wrapper for the library call to unshare Linux kernel namespaces""" + lib = ctypes.CDLL(None, use_errno=True) + lib.unshare.argtypes = [ctypes.c_int] + rc = lib.unshare(flags) + if rc != 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno), flags) + + +def mount(source="none", target="", fs="", flags=0, options=""): + # type:(str, str, str, int, str) -> None + """Wrapper for the library call mount(). Supports Python2.7 and Python3.x""" + lib = ctypes.CDLL(None, use_errno=True) + lib.mount.argtypes = ( + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_ulong, + ctypes.c_char_p, + ) + result = lib.mount( + source.encode(), target.encode(), fs.encode(), flags, options.encode() + ) + if result < 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError( + errno, + "mount " + target + " (" + options + "): " + os.strerror(errno), + ) + + +def umount(target): # type:(str) -> None + """Wrapper for the Linux umount system call, supports Python2.7 and Python3.x""" + lib = ctypes.CDLL(None, use_errno=True) + result = lib.umount(ctypes.c_char_p(target.encode())) + if result < 0: # pragma: no cover + errno = ctypes.get_errno() + raise OSError(errno, "umount " + target + ": " + os.strerror(errno)) + + +def enter_private_mount_namespace(): + """Enter a private mount and user namespace with the user and simulate uid 0 + + Some code like mount() requires to be run as root. The container simulates + root-like privileges and a new mount namespace that allows mount() in it. + + Implements the equivalent of `/usr/bin/unshare --map-root-user --mount` + """ + + # Read the actual user and group ids before entering the new user namespace: + real_uid = os.getuid() + real_gid = os.getgid() + unshare(CLONE_NEWUSER | CLONE_NEWNS) + # Setup user map to map the user id to behave like uid 0: + with open("/proc/self/uid_map", "wb") as proc_self_user_map: + proc_self_user_map.write(b"0 %d 1" % real_uid) + with open("/proc/self/setgroups", "wb") as proc_self_set_groups: + proc_self_set_groups.write(b"deny") + # Setup group map for the user's gid to behave like gid 0: + with open("/proc/self/gid_map", "wb") as proc_self_group_map: + proc_self_group_map.write(b"0 %d 1" % real_gid) + # Private root mount in the mount namespace top support mounting a private tmpfs: + mount(target="/", flags=MS_REC | MS_PRIVATE) + return True diff --git a/python3/tests/test_usb_reset_mount.py b/python3/tests/test_usb_reset_mount.py new file mode 100644 index 00000000000..9cfe3b5b804 --- /dev/null +++ b/python3/tests/test_usb_reset_mount.py @@ -0,0 +1,14 @@ +"""scripts/unit_test/test_usb_reset_mount.py: Test usb_reset.mount and .umount""" +from __future__ import print_function + +from .import_helper import import_file_as_module, mocked_modules + + +def test_usb_reset_mount_umount(private_mount_namespace): + """Test usb_reset.mount and .umount""" + assert private_mount_namespace + with mocked_modules("xcp", "xcp.logger"): + usb_reset = import_file_as_module("python3/libexec/usb_reset.py") + usb_reset.log.error = print + usb_reset.mount(source="tmpfs", target="/tmp", fs="tmpfs") + usb_reset.umount("/tmp") diff --git a/scripts/Makefile b/scripts/Makefile index 6a850199ba6..38459115396 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -108,7 +108,6 @@ install: $(IPROG) pam.d-xapi $(DESTDIR)/etc/pam.d/xapi $(IPROG) upload-wrapper logs-download $(DESTDIR)$(LIBEXECDIR) $(IDATA) usb-policy.conf $(DESTDIR)$(ETCXENDIR) - $(IPROG) usb_reset.py $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(OPTDIR)/packages/iso #omg XXX $(IPROG) xapi-rolling-upgrade-miami $(DESTDIR)$(LIBEXECDIR)/xapi-rolling-upgrade $(IPROG) set-hostname $(DESTDIR)$(LIBEXECDIR) From 2bf2a44cc29740e2bcc50d9625fbb35eaf9ad93e Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 24 Apr 2024 12:00:00 +0200 Subject: [PATCH 053/341] python3/unittest: Replace import_file with import_helper Bernhard Kaindl: Resolved a conflict with one of my other upcoming changes which removes the need to a pytype ignore by using a cast() instead, which fixes pyright/pylance/vscode. Also keep import_helper in python3/tests as the the name "unittest" sould likely avoided as it clashes with the unittest module. The tests in python3/tests are written for pytest instead and not all of them will classify as unit tests, so using the name unittest for them would also be a misnomer. Using the name tests is shorter and more generic, and we can also use the separation beween tests and unittest to differentiate between modern pytest tests and legact unittest-based tests which should possibly be better migrated to pytest at some point for the benefits that pytest gives: For example with pytest, you can use use just assert and you do not need to use self.assert...(), because pytest implements the proper assert matching diagnostics. In the long run, the classic unittest tests should no longer be used. Co-authored-by: Bernhard Kaindl Signed-off-by: Pau Ruiz Safont --- python3/__init__.py | 0 python3/tests/test_observer.py | 4 +-- python3/tests/test_usb_reset_mount.py | 2 +- python3/unittest/import_file.py | 25 --------------- python3/unittest/test_hfx_filename.py | 8 ++--- python3/unittest/test_nbd_client_manager.py | 5 ++- python3/unittest/test_perfmon.py | 34 ++++++++++----------- python3/unittest/test_usb_scan.py | 11 ++++--- 8 files changed, 33 insertions(+), 56 deletions(-) create mode 100644 python3/__init__.py delete mode 100644 python3/unittest/import_file.py diff --git a/python3/__init__.py b/python3/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/python3/tests/test_observer.py b/python3/tests/test_observer.py index 53944d97ca9..9464efbc3e3 100644 --- a/python3/tests/test_observer.py +++ b/python3/tests/test_observer.py @@ -10,7 +10,7 @@ with patch("os.listdir") as mock_listdir: # Prevent it finding an observer.conf mock_listdir.return_value = [] - from packages import observer + from python3.packages import observer # mock modules to avoid dependencies sys.modules["opentelemetry"] = MagicMock() @@ -29,7 +29,7 @@ OTEL_RESOURCE_ATTRIBUTES='service.name=sm' """ TEST_OBSERVER_CONF = "test-observer.conf" -OBSERVER_OPEN = "packages.observer.open" +OBSERVER_OPEN = "python3.packages.observer.open" # pylint: disable=missing-function-docstring,protected-access diff --git a/python3/tests/test_usb_reset_mount.py b/python3/tests/test_usb_reset_mount.py index 9cfe3b5b804..e9d432742f6 100644 --- a/python3/tests/test_usb_reset_mount.py +++ b/python3/tests/test_usb_reset_mount.py @@ -1,7 +1,7 @@ """scripts/unit_test/test_usb_reset_mount.py: Test usb_reset.mount and .umount""" from __future__ import print_function -from .import_helper import import_file_as_module, mocked_modules +from python3.tests.import_helper import import_file_as_module, mocked_modules def test_usb_reset_mount_umount(private_mount_namespace): diff --git a/python3/unittest/import_file.py b/python3/unittest/import_file.py deleted file mode 100644 index 581f8f4b401..00000000000 --- a/python3/unittest/import_file.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -This file is used for importing a non-".py" file as a module in unit test. -It never runs directly, so no shebang and no main() -""" -import sys -import os -from importlib import machinery, util - -def import_from_file(module_name, file_path): - """Import a file as a module""" - loader = machinery.SourceFileLoader(module_name, file_path) - spec = util.spec_from_loader(module_name, loader) - assert spec - assert spec.loader - module = util.module_from_spec(spec) - # Probably a good idea to add manually imported module stored in sys.modules - sys.modules[module_name] = module - spec.loader.exec_module(module) - return module - -def get_module(module_name, file_path): - """get the module from a file""" - testdir = os.path.dirname(__file__) - print(testdir) - return import_from_file(module_name, "{}/{}".format(testdir, file_path)) diff --git a/python3/unittest/test_hfx_filename.py b/python3/unittest/test_hfx_filename.py index 0fc4f5abba3..ca3618f38c2 100644 --- a/python3/unittest/test_hfx_filename.py +++ b/python3/unittest/test_hfx_filename.py @@ -7,12 +7,12 @@ import sys import unittest from mock import MagicMock, patch, call -from import_file import get_module +from python3.tests.import_helper import import_file_as_module # mock modules to avoid dependencies sys.modules["XenAPI"] = MagicMock() -hfx_filename = get_module("hfx_filename", "../bin/hfx_filename") +hfx_filename = import_file_as_module("python3/bin/hfx_filename") @patch("socket.socket") @@ -82,7 +82,7 @@ def test_rpc_international_character(self, mock_socket): def test_db_get_uuid(self, mock_socket): """ - Tests db_get_uuid + Tests db_get_uuid """ mock_connected_socket = MagicMock() mock_socket.return_value = mock_connected_socket @@ -100,7 +100,7 @@ def test_db_get_uuid(self, mock_socket): def test_read_field(self, mock_socket): """ - Tests read_field + Tests read_field """ mock_connected_socket = MagicMock() mock_socket.return_value = mock_connected_socket diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/unittest/test_nbd_client_manager.py index 48ca22be297..224a1c3e2ea 100644 --- a/python3/unittest/test_nbd_client_manager.py +++ b/python3/unittest/test_nbd_client_manager.py @@ -3,13 +3,12 @@ This module provides unittest for nbd_client_manager.py """ -import sys import unittest import subprocess from mock import MagicMock, patch, mock_open, call -from import_file import get_module +from python3.tests.import_helper import import_file_as_module -nbd_client_manager = get_module("nbd_client_manager", "../libexec/nbd_client_manager.py") +nbd_client_manager = import_file_as_module("python3/libexec/nbd_client_manager.py") @patch('subprocess.Popen') class TestCallFunction(unittest.TestCase): diff --git a/python3/unittest/test_perfmon.py b/python3/unittest/test_perfmon.py index a61e66aeaa4..9d638f4fab4 100644 --- a/python3/unittest/test_perfmon.py +++ b/python3/unittest/test_perfmon.py @@ -7,12 +7,12 @@ import math import unittest from mock import MagicMock, patch, mock_open -from import_file import get_module +from python3.tests.import_helper import import_file_as_module # mock modules to avoid dependencies sys.modules["XenAPI"] = MagicMock() -perfmon = get_module("perfmon", "../bin/perfmon") +perfmon = import_file_as_module("python3/bin/perfmon") @patch("subprocess.getoutput") @@ -292,13 +292,13 @@ def test_process_rrd_updates(self, mock_get_percent_mem_usage, obj_report = perfmon.ObjectReport("vm", uuid) obj_report.vars = { 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], - 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], - 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], - 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], - 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], - 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], - 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], - 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], 'memory': [2785000000.0, 2785000000.0, 2785000000.0, 2785000000.0, 2785000000.0] } @@ -332,13 +332,13 @@ def test_process_rrd_updates(self, mock_xapisession): obj_report = perfmon.ObjectReport("vm", uuid) obj_report.vars = { 'cpu0': [0.0063071, 0.0048038, 0.0045862, 0.0048865, 0.0048923], - 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], - 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], - 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], - 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], - 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], - 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], - 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], + 'cpu1': [0.0067629, 0.0055811, 0.0058988, 0.0058809, 0.0053645], + 'cpu2': [0.0088599, 0.0078701, 0.0058573, 0.0063993, 0.0056833], + 'cpu3': [0.0085826, 0.0056874, 0.005697, 0.0061155, 0.0048769], + 'cpu4': [0.0051265, 0.0045452, 0.0046137, 0.0066399, 0.0050993], + 'cpu5': [0.0062369, 0.0053982, 0.0056624, 0.00606, 0.0062017], + 'cpu6': [0.006235, 0.0041764, 0.0048101, 0.0053798, 0.0050934], + 'cpu7': [0.0050709, 0.005482, 0.0058926, 0.0052934, 0.0049544], 'memory': [2785000000.0, 2785000000.0, 2785000000.0, 2785000000.0, 2785000000.0] } @@ -415,7 +415,7 @@ def test_process_rrd_updates(self, mock_xapisession): obj_report = perfmon.ObjectReport("vm", uuid) obj_report.vars = { 'size': [100, 200, 300, 400, 500], - 'physical_utilisation': [2000, 3000, 4000, 5000, 6000], + 'physical_utilisation': [2000, 3000, 4000, 5000, 6000], } rrd_updates.report.obj_reports[uuid] = obj_report rrd_updates.report.rows = 5 diff --git a/python3/unittest/test_usb_scan.py b/python3/unittest/test_usb_scan.py index d87f9b12b27..e5ee00a253a 100644 --- a/python3/unittest/test_usb_scan.py +++ b/python3/unittest/test_usb_scan.py @@ -8,9 +8,11 @@ import tempfile import unittest from collections.abc import Mapping +from typing import cast import mock -from import_file import get_module + +from python3.tests.import_helper import import_file_as_module sys.modules["xcp"] = mock.Mock() sys.modules["xcp.logger"] = mock.Mock() @@ -107,9 +109,10 @@ def verify_usb_common( self, moc_devices, moc_interfaces, moc_results, - path="./scripts/usb-policy.conf" + # Use relative path to allow tests to be started in subdirectories + path = os.path.dirname(__file__) + "/../../scripts/usb-policy.conf" ): - usb_scan = get_module("usb_scan", "../libexec/usb_scan.py") + usb_scan = import_file_as_module("python3/libexec/usb_scan.py") mock_setup(usb_scan, moc_devices, moc_interfaces, path) @@ -134,7 +137,7 @@ def verify_usb_exit( # cm.exception.code is int type whose format # looks like "duplicated tag'vid' found, # malformed line ALLOW:vid=056a vid=0314 class=03" - self.assertIn(msg, cm.exception.code) # pytype: disable=wrong-arg-types + self.assertIn(msg, cast(str, cm.exception.code)) # code is a str def test_usb_dongle(self): devices = [ From fdc9c0812cb33bce30dbcec2039879cc44bd27b3 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Sun, 28 Apr 2024 02:06:09 +0100 Subject: [PATCH 054/341] Merge python3/unittest into python3/tests Signed-off-by: Stephen Cheng --- .codecov.yml | 4 ++-- .github/workflows/main.yml | 2 +- python3/{unittest => tests}/test_hfx_filename.py | 0 python3/{unittest => tests}/test_nbd_client_manager.py | 0 python3/{unittest => tests}/test_perfmon.py | 0 python3/{unittest => tests}/test_usb_scan.py | 4 ++-- 6 files changed, 5 insertions(+), 5 deletions(-) rename python3/{unittest => tests}/test_hfx_filename.py (100%) rename python3/{unittest => tests}/test_nbd_client_manager.py (100%) rename python3/{unittest => tests}/test_perfmon.py (100%) rename python3/{unittest => tests}/test_usb_scan.py (99%) diff --git a/.codecov.yml b/.codecov.yml index 79d69aa0b14..f67f6913dc8 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -214,7 +214,7 @@ coverage: tests: # Ensure that all tests are executed (tests themselves must be 100% covered) target: 98% - paths: ["python3/unittest/test_*.py"] + paths: ["python3/tests/test_*.py"] # @@ -266,5 +266,5 @@ component_management: - component_id: test_cases name: test_cases - paths: ["python3/unittest/test_*.py"] + paths: ["python3/tests/test_*.py"] diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9d55ec60312..da0e2bd35a2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -73,7 +73,7 @@ jobs: run: > pytest --cov=python3/ - python3/unittest python3/tests -vv -rA + python3/tests -vv -rA --junitxml=.git/pytest${{matrix.python-version}}.xml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml diff --git a/python3/unittest/test_hfx_filename.py b/python3/tests/test_hfx_filename.py similarity index 100% rename from python3/unittest/test_hfx_filename.py rename to python3/tests/test_hfx_filename.py diff --git a/python3/unittest/test_nbd_client_manager.py b/python3/tests/test_nbd_client_manager.py similarity index 100% rename from python3/unittest/test_nbd_client_manager.py rename to python3/tests/test_nbd_client_manager.py diff --git a/python3/unittest/test_perfmon.py b/python3/tests/test_perfmon.py similarity index 100% rename from python3/unittest/test_perfmon.py rename to python3/tests/test_perfmon.py diff --git a/python3/unittest/test_usb_scan.py b/python3/tests/test_usb_scan.py similarity index 99% rename from python3/unittest/test_usb_scan.py rename to python3/tests/test_usb_scan.py index e5ee00a253a..f63e4bb8f10 100644 --- a/python3/unittest/test_usb_scan.py +++ b/python3/tests/test_usb_scan.py @@ -13,10 +13,11 @@ import mock from python3.tests.import_helper import import_file_as_module - +# mock modules to avoid dependencies sys.modules["xcp"] = mock.Mock() sys.modules["xcp.logger"] = mock.Mock() sys.modules["pyudev"] = mock.Mock() +usb_scan = import_file_as_module("python3/libexec/usb_scan.py") class MocDeviceAttrs(Mapping): @@ -112,7 +113,6 @@ def verify_usb_common( # Use relative path to allow tests to be started in subdirectories path = os.path.dirname(__file__) + "/../../scripts/usb-policy.conf" ): - usb_scan = import_file_as_module("python3/libexec/usb_scan.py") mock_setup(usb_scan, moc_devices, moc_interfaces, path) From 0c96f1d4d68ad6070b2464b3ce970500487cb423 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 6 May 2024 12:00:00 +0200 Subject: [PATCH 055/341] .codecov.yml: Update Coverity config to cleanup obsolete python2 components Signed-off-by: Bernhard Kaindl --- .codecov.yml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index f67f6913dc8..c0092974257 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -184,14 +184,6 @@ coverage: # project: - # - # Python modules and scripts below scripts/ (excluding tests) - # - scripts: - paths: ["scripts/**", "!**/test_*.py"] - target: 48% - threshold: 2% - # # Python modules and scripts below ocaml/ (excluding tests) # @@ -240,15 +232,6 @@ component_management: individual_components: - - component_id: scripts # this is an identifier that should not be changed - name: scripts # this is a display name, and can be changed freely - # The list of paths that should be in- and excluded in this component: - paths: ["scripts/**", "!scripts/examples/**", "!**/test_*.py"] - - - component_id: scripts/examples - name: scripts/examples - paths: ["scripts/examples/**", "!scripts/**/test_*.py"] - - component_id: ocaml name: ocaml paths: ["ocaml/**", "!**/test_*.py"] From dcab8cb66f9fa4c5b7fef2e24e4fa7c27d5310ce Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 9 May 2024 12:00:00 +0200 Subject: [PATCH 056/341] Fixup the merge: fix duplicated pylint config and fix pytype with pyudev Signed-off-by: Bernhard Kaindl --- .github/workflows/other.yml | 2 +- pyproject.toml | 64 +++++++++++++++++----------- python3/libexec/usb_reset.py | 2 +- python3/libexec/usb_scan.py | 4 +- python3/tests/import_helper.py | 2 +- python3/tests/test_observer.py | 2 +- scripts/examples/python/provision.py | 4 +- scripts/mail-alarm | 2 +- 8 files changed, 48 insertions(+), 34 deletions(-) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index d65b7abe575..0284e7d7819 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -51,7 +51,7 @@ jobs: - name: Install dependencies only needed for python 3 if: ${{ matrix.python-version != '2.7' }} - run: pip install opentelemetry-api opentelemetry-exporter-zipkin-json opentelemetry-sdk pandas pytype toml wrapt + run: pip install opentelemetry-api opentelemetry-exporter-zipkin-json opentelemetry-sdk pandas pytype toml wrapt pyudev - name: Install common dependencies for Python ${{matrix.python-version}} run: pip install future mock pytest-coverage pytest-mock diff --git a/pyproject.toml b/pyproject.toml index 4c4a855c59e..2eed863da0b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,28 +88,6 @@ profile = "black" combine_as_imports = true ensure_newline_before_comments = false -[tool.pylint.messages_control] -disable = [ - "missing-function-docstring", - "missing-module-docstring", - "missing-class-docstring", - "consider-using-f-string", - "too-many-branches", - "too-many-arguments", - "broad-exception-caught", - "no-else-break", - "no-else-return", - "invalid-name", - "import-error", - "unnecessary-pass", - "unspecified-encoding", - "protected-access", - "no-member", # Some mutiple inheritance classes may have this issue - "too-many-locals", # Long functions. Need to refine the code - "too-many-statements", - "too-many-return-statements" -] - # ----------------------------------------------------------------------------- # Mypy static analysis - https://mypy.readthedocs.io/en/stable/config_file.html # ----------------------------------------------------------------------------- @@ -257,16 +235,52 @@ discard_messages_matching = [ "No attribute 'group' on None", "No Node.TEXT_NODE in module xml.dom.minidom, referenced from 'xml.dom.expatbuilder'" ] -expected_to_fail = [] - +expected_to_fail = [ + # Need 2to3 -w and maybe a few other minor updates: + "scripts/hatests", + "scripts/backup-sr-metadata.py", + "scripts/restore-sr-metadata.py", + # SSLSocket.send() only accepts bytes, not unicode string as argument: + "scripts/examples/python/exportimport.py", + # Other fixes needed: + "scripts/examples/python/mini-xenrt.py", + "scripts/examples/python/XenAPI/XenAPI.py", + "scripts/examples/python/monitor-unwanted-domains.py", + "scripts/examples/python/shell.py", + "scripts/examples/smapiv2.py", + "scripts/static-vdis", + "scripts/plugins/extauth-hook-AD.py", +] [tool.pytype] inputs = [ + # Python 3 "python3/", "ocaml/xcp-rrdd", + # Python2: These will generate warnings that need to be fixed: + "scripts/static-vdis", + "scripts/generate-iscsi-iqn", + "scripts/hatests", + "scripts/host-display", + "scripts/mail-alarm", + "scripts/print-custom-templates", + "scripts/probe-device-for-file", + "scripts/xe-reset-networking", + "scripts/xe-scsi-dev-map", + "scripts/examples/python", + "scripts/yum-plugins", + "scripts/*.py", + + # To be added later, + # when converted to Python3-compatible syntax: + # "ocaml/message-switch/python", + # "ocaml/idl/ocaml_backend/python", + # "ocaml/xapi-storage/python", ] disable = [ + # Reduce noise from python2 scripts(import yum, xenfsimage, xcp, urlgrabber) + "import-error", ] platform = "linux" # Allow pytype to find the XenAPI module, the rrdd module and python3 modules: -pythonpath = "python3:scripts/examples/python:ocaml/xcp-rrdd/scripts/rrdd" +pythonpath = "scripts/examples/python:.:scripts:scripts/plugins:scripts/examples" diff --git a/python3/libexec/usb_reset.py b/python3/libexec/usb_reset.py index 82a690bcea0..8d96dde369e 100755 --- a/python3/libexec/usb_reset.py +++ b/python3/libexec/usb_reset.py @@ -47,7 +47,7 @@ import errno import fcntl import grp -import xcp.logger as log +import xcp.logger as log # pytype: disable=import-error import logging import os import pwd diff --git a/python3/libexec/usb_scan.py b/python3/libexec/usb_scan.py index e940aa626f5..c45686f6404 100755 --- a/python3/libexec/usb_scan.py +++ b/python3/libexec/usb_scan.py @@ -30,7 +30,7 @@ import sys import pyudev -import xcp.logger as log +import xcp.logger as log # pytype: disable=import-error def log_list(l): @@ -56,7 +56,7 @@ def hex_equal(h1, h2): return False -class UsbObject(dict): +class UsbObject(dict): # pytype: disable=ignored-metaclass """Base class of USB classes, save USB properties in dict node(str): the key, device node diff --git a/python3/tests/import_helper.py b/python3/tests/import_helper.py index 87541c9b6cf..076a24913c7 100644 --- a/python3/tests/import_helper.py +++ b/python3/tests/import_helper.py @@ -9,7 +9,7 @@ @contextmanager -def mocked_modules(*module_names): # type:(str) -> Generator[None, None, None] +def mocked_modules(*module_names: str) -> Generator[None, None, None]: """Context manager that temporarily mocks the specified modules. :param module_names: Variable number of names of the modules to be mocked. diff --git a/python3/tests/test_observer.py b/python3/tests/test_observer.py index cdd7f7e143f..a8d6f238eec 100644 --- a/python3/tests/test_observer.py +++ b/python3/tests/test_observer.py @@ -4,7 +4,7 @@ import sys import unittest -from mock import MagicMock, mock_open, patch +from unittest.mock import MagicMock, mock_open, patch # Ensure observer is initialised as noop with patch("os.listdir") as mock_listdir: diff --git a/scripts/examples/python/provision.py b/scripts/examples/python/provision.py index b8aa3f3935f..4c5ab11daef 100644 --- a/scripts/examples/python/provision.py +++ b/scripts/examples/python/provision.py @@ -62,7 +62,7 @@ def setSR(self, sr): def parseProvisionSpec(txt): """Return an instance of type ProvisionSpec given XML text""" - doc = xml.dom.minidom.parseString(txt) + doc = xml.dom.minidom.parseString(txt) # pytype: disable=pyi-error all = doc.getElementsByTagName("provision") if len(all) != 1: raise ValueError("Expected to find exactly one element") @@ -74,7 +74,7 @@ def parseProvisionSpec(txt): def printProvisionSpec(ps): """Return a string containing pretty-printed XML corresponding to the supplied provisioning spec""" - doc = xml.dom.minidom.Document() + doc = xml.dom.minidom.Document() # pytype: disable=pyi-error doc.appendChild(ps.toElement(doc)) return doc.toprettyxml() diff --git a/scripts/mail-alarm b/scripts/mail-alarm index 5fd432339bf..0b41dd5e0e9 100755 --- a/scripts/mail-alarm +++ b/scripts/mail-alarm @@ -21,7 +21,7 @@ import syslog import tempfile import traceback from socket import getfqdn -from xml.dom import minidom +from xml.dom import minidom # pytype: disable=pyi-error import XenAPI from xcp import branding From 6545e1da25b1bedaedde94e8474e719c91dd94d4 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 9 May 2024 12:00:00 +0200 Subject: [PATCH 057/341] CA-390883: python3/tests: Fix remaining pylint and pyright comments in python3/*.py Signed-off-by: Bernhard Kaindl --- python3/bin/hfx_filename | 8 +++++++- python3/libexec/nbd_client_manager.py | 2 +- python3/libexec/usb_reset.py | 2 +- python3/libexec/usb_scan.py | 9 ++++++--- python3/tests/test_perfmon.py | 2 ++ python3/tests/test_usb_scan.py | 11 +++++------ scripts/test_mail-alarm.py | 4 ++++ 7 files changed, 26 insertions(+), 12 deletions(-) diff --git a/python3/bin/hfx_filename b/python3/bin/hfx_filename index dd8677fc499..28fb05bbc78 100755 --- a/python3/bin/hfx_filename +++ b/python3/bin/hfx_filename @@ -14,8 +14,14 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# pylint: disable=redefined-outer-name +# pyright: reportFunctionMemberAccess=false +# pyright: reportOptionalMemberAccess=false, reportAttributeAccessIssue=false -import sys, socket, urllib.request, XenAPI +import sys +import socket + +import XenAPI db_url = "/remote_db_access" diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index e30477316d8..0f77e69b12e 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -136,7 +136,7 @@ def _find_unused_nbd_device(): return nbd_device # If there are 1000 nbd devices (unlikely) and all are connected - raise NbdDeviceNotFound(nbd_device) + raise NbdDeviceNotFound(nbd_device) # pyright:ignore[reportPossiblyUnboundVariable] def _wait_for_nbd_device(nbd_device, connected): deadline = datetime.now() + timedelta(minutes=MAX_DEVICE_WAIT_MINUTES) diff --git a/python3/libexec/usb_reset.py b/python3/libexec/usb_reset.py index 8d96dde369e..573936ae1c3 100755 --- a/python3/libexec/usb_reset.py +++ b/python3/libexec/usb_reset.py @@ -132,7 +132,7 @@ def load_device_ids(device): # ignore and continue log.warning("Failed to remove device ids: {}".format(str(e))) - return uid, gid + return uid, gid # pyright: ignore[reportPossiblyUnboundVariable] # pragma: no cover # throw IOError, ValueError diff --git a/python3/libexec/usb_scan.py b/python3/libexec/usb_scan.py index c45686f6404..03d89f7baed 100755 --- a/python3/libexec/usb_scan.py +++ b/python3/libexec/usb_scan.py @@ -21,6 +21,8 @@ # 2. check if device can be passed through based on policy file # 3. return the device info to XAPI in json format +# pylint: disable=redefined-outer-name +# pyright: reportPossiblyUnboundVariable=false, reportAttributeAccessIssue=false import abc import argparse @@ -71,7 +73,7 @@ def __init__(self, node): def get_node(self): return self.node - def __hash__(self): + def __hash__(self): # pyright:ignore[reportIncompatibleVariableOverride] return hash(self.node) def __eq__(self, other): @@ -109,14 +111,14 @@ def _is_class_hub(self, key_class): return cls is not None and hex_equal(__VALUE_CLASS_HUB, cls) @abc.abstractmethod - def is_class_hub(self): + def is_class_hub(self) -> bool: """check if this belongs to a hub :return: bool, if this belongs to a hub """ @abc.abstractmethod - def is_child_of(self, parent): + def is_child_of(self, parent) -> bool: """check if this is a child of parent :param parent:(UsbObject) the parent to check against @@ -282,6 +284,7 @@ def __init__(self, node, props): if props.get(p) is not None: self[p] = props.get(p).decode() + # pylint: disable-next=useless-parent-delegation # This parent call is superfluous def debug_str(self, level=0): return super().debug_str(level) diff --git a/python3/tests/test_perfmon.py b/python3/tests/test_perfmon.py index 9d638f4fab4..c133a1171ac 100644 --- a/python3/tests/test_perfmon.py +++ b/python3/tests/test_perfmon.py @@ -3,6 +3,8 @@ This module provides unittest for perfmon """ +# pyright: reportAttributeAccessIssue=false + import sys import math import unittest diff --git a/python3/tests/test_usb_scan.py b/python3/tests/test_usb_scan.py index f63e4bb8f10..bf0bad03fef 100644 --- a/python3/tests/test_usb_scan.py +++ b/python3/tests/test_usb_scan.py @@ -25,8 +25,7 @@ def __init__(self, device): self.d = device.get_attr() def __iter__(self): # pragma: no cover - for name in self.d: - yield name + yield from self.d def __len__(self): # pragma: no cover return len(self.d) @@ -54,8 +53,7 @@ def attributes(self): return MocDeviceAttrs(self) def __iter__(self): # pragma: no cover - for name in self.get_prop(): - yield name + yield from self.get_prop() def __len__(self): # pragma: no cover return len(self.get_prop()) @@ -64,7 +62,7 @@ def __getitem__(self, name): return self.get_prop().get(name) -class MocEnumerator(object): +class MocEnumerator(): def __init__(self, ds): self.ds = ds @@ -73,7 +71,7 @@ def __iter__(self): yield MocDevice(d) -class MocContext(object): +class MocContext(): def __init__(self, devices, interfaces): self.devices = devices self.interfaces = interfaces @@ -85,6 +83,7 @@ def list_devices(self, **kwargs): return MocEnumerator(self.devices) elif dev_type == "usb_interface": return MocEnumerator(self.interfaces) + raise AssertionError(f"unexpected {dev_type}") # pragma: no cover def mock_setup(mod, devices, interfaces, path): diff --git a/scripts/test_mail-alarm.py b/scripts/test_mail-alarm.py index 2a918f5edbe..acd5f5f20a5 100644 --- a/scripts/test_mail-alarm.py +++ b/scripts/test_mail-alarm.py @@ -8,6 +8,10 @@ import sys import unittest import mock +import pytest + +if sys.version_info > (2, ): + pytest.skip(allow_module_level=True) def nottest(obj): obj.__test__ = False From 816157241572f7a42bc231601d8cc087b29900b6 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 9 May 2024 12:00:00 +0200 Subject: [PATCH 058/341] CA-390883: pyproject.toml: Add config for pytest & other local checks Signed-off-by: Bernhard Kaindl --- pyproject.toml | 96 ++++++++++++++++++++--------------- python3/stubs/XenAPI.pyi | 85 +++++++++++++++++++++++++++++++ python3/stubs/xcp/branding.py | 38 ++++++++++++++ python3/stubs/xcp/logger.pyi | 6 +++ pytype_reporter.py | 5 +- 5 files changed, 186 insertions(+), 44 deletions(-) create mode 100644 python3/stubs/XenAPI.pyi create mode 100644 python3/stubs/xcp/branding.py create mode 100644 python3/stubs/xcp/logger.pyi diff --git a/pyproject.toml b/pyproject.toml index 2eed863da0b..5ea22b96551 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,11 +34,15 @@ line-length = 88 [tool.coverage.report] # Here, developers can configure which lines do not need to be covered by tests: +# fail_under: minimum code coverage percentage +fail_under = 50 +# exclude_lines: lines that are not required to be covered exclude_lines = [ "pragma: no cover", # standard pragma for not covering a line or block "if TYPE_CHECKING:", # imports for type checking only "pass", # Other specific lines that do not need to be covered, comment in which file: + "raise NbdDeviceNotFound", # python3/libexec/usb_scan.py ] # precision digits to use when reporting coverage (sub-percent-digits are not reported): precision = 0 @@ -166,69 +170,73 @@ disable = [ # ----------------------------------------------------------------------------- # Pyright is the static analysis behind the VSCode Python extension / Pylance -# https://microsoft.github.io/pyright/#/configuration?id=main-configuration-options +# https://microsoft.github.io/pyright/#/configuration # ----------------------------------------------------------------------------- [tool.pyright] -# Specifies the paths of directories or files that should be included in the -# analysis. If no paths are specified, all files in the workspace are included: -include = ["python3", "ocaml/xcp-rrdd"] - -# Conditionalize the stube files for type definitions based on the platform: -pythonPlatform = "Linux" - -# typeCheckingMode: "off", "basic", "standard" or "strict" -typeCheckingMode = "standard" - -# Specifies the version of Python that will be used to execute the source code. -# Generate errors if the source code makes use of language features that are -# not supported in that version. It will also tailor its use of type stub files, -# which conditionalizes type definitions based on the version. If no version is -# specified, pyright will use the version of the current python interpreter, -# if one is present: -pythonVersion = "3.6" - -# Paths of directories or files that should use "strict" analysis if they are -# included. This is the same as manually adding a "# pyright: strict" comment. -# In strict mode, most type-checking rules are enabled, and the type-checker -# will be more aggressive in inferring types. If no paths are specified, strict -# mode is not enabled: -strict = ["python3/tests/test_observer.py"] - -# -# Paths to exclude from analysis. If a file is excluded, it will not be -# analyzed. -# -# FIXME: Some of these may have type errors, so they should be inspected and fixed: -# -exclude = [ +# include: directories to include in checking +# strict: paths for which strict checking works +# typeCheckingMode: set the standard type checking mode +include = ["python3", "ocaml/xcp-rrdd"] +strict = ["python3/tests/observer"] +pythonPlatform = "Linux" +typeCheckingMode = "standard" +reportMissingImports = false +pythonVersion = "3.6" +exclude = [ "ocaml/xcp-rrdd/scripts/rrdd/rrdd.py", "ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py", "python3/packages/observer.py", - "python3/tests/pytype_reporter.py", ] # ----------------------------------------------------------------------------- # Pytest is the test framework, for discovering and running tests, fixtures etc -# https://pytest.readthedocs.io/en/latest/customize.html +# https://pytest.readthedocs.io/en/latest/customize.html, https://docs.pytest.org # ----------------------------------------------------------------------------- - [tool.pytest.ini_options] -addopts = "-ra" # Show the output of all tests, including those that passed -log_cli = true # Capture log messages and show them in the output as well +# ----------------------------------------------------------------------------- +# Options to enable for pytest by default: +# -v show what happens +# -rA show summary after running tests +# --cov=python3 measure coverage of the python3 directory +# --cov-fail-under minimum coverage percentage +# --cov-report=term-missing show missing lines in the coverage report +# --cov-report=html: generate an HTML coverage report(for viewing) +# --cov-report=xml: generate an XML coverage report(for upload) +# ----------------------------------------------------------------------------- +addopts = """ +-v -rA --cov=python3 --cov=scripts --cov-fail-under=50 +--cov-report=html:.git/coverage --cov-report=term-missing +--cov-report=xml:.git/coverage3.11.xml +""" + +# ----------------------------------------------------------------------------- +# Other pytest config options: +# log_cli: show logger messages +# log_cli_level: log level to show +# python_files: pattern for test files +# python_functions: pattern for test functions +# testpaths: directories to search for tests +# minversion: this config requires pytest>=7 to configure pythonpath +# pythonpath: path to stub files and typing stubs for tests +# xfail_strict: require to remove pytext.xfail marker when test is fixed +# required_plugins: require that these plugins are installed before testing +# ----------------------------------------------------------------------------- +testpaths = ["python3", "scripts", "ocaml/xcp-rrdd"] +required_plugins = ["pytest-cov", "pytest-mock"] log_cli_level = "INFO" +log_cli = true +minversion = "7.0" +pythonpath = "python3/stubs:scripts/examples/python" # Allow to import the XenAPI module python_files = ["test_*.py", "it_*.py"] python_functions = ["test_", "it_", "when_"] -pythonpath = "scripts/examples/python" # Allows to import the XenAPI module -required_plugins = ["pytest-mock"] -testpaths = ["python3", "scripts", "ocaml/xcp-rrdd"] xfail_strict = true # is used to fail tests that are marked as xfail but pass(for TDD) [tool.pytype_reporter] -default_branch = "master" +default_branch = "feature/py3" discard_messages_matching = [ "Couldn't import pyi for 'xml.dom.minidom'", "No attribute '.*' on RRDContentHandler", @@ -252,6 +260,10 @@ expected_to_fail = [ "scripts/plugins/extauth-hook-AD.py", ] +# ----------------------------------------------------------------------------- +# pytype: Google's static type analyzer - https://google.github.io/pytype/ +# ----------------------------------------------------------------------------- + [tool.pytype] inputs = [ # Python 3 diff --git a/python3/stubs/XenAPI.pyi b/python3/stubs/XenAPI.pyi new file mode 100644 index 00000000000..4590e614814 --- /dev/null +++ b/python3/stubs/XenAPI.pyi @@ -0,0 +1,85 @@ +""" +Stub for the XenAPI module: https://xapi-project.github.io/xen-api/overview.html +""" + + +import http.client as httplib +import xmlrpc.client as xmlrpclib +from _typeshed import Incomplete as Incomplete + +translation: Incomplete +API_VERSION_1_1: str +API_VERSION_1_2: str + + +class Failure(Exception): + details: Incomplete + + def __init__(self, details) -> None: ... + + +class UDSHTTPConnection(httplib.HTTPConnection): + sock: Incomplete + + def connect(self) -> None: ... + + +class UDSTransport(xmlrpclib.Transport): + def add_extra_header(self, key, value) -> None: ... + + # def make_connection(self, host) -> None: ... + + +def notimplemented(name, *args, **kwargs) -> None: ... + + +class _Dispatcher: + """A dispatcher for the Xen-API. It is used to call methods on the server""" + def __init__(self, API_version, send, name) -> None: ... + def __getattr__(self, name) -> None: ... + def __call__(self, *args) -> None: ... + def login_with_password(self, username, password, version, client_name) -> None: + """Authenticate the session with the XenAPI server.""" + def logout(self) -> None: + """End the session with the XenAPI server.""" + session: Incomplete + secret: Incomplete + SR: Incomplete + PBD: Incomplete + pool: Incomplete + VM: Incomplete + + +class Session(xmlrpclib.ServerProxy): + """A server proxy and session manager for communicating with xapi using + the Xen-API. + + Example: + + session = Session('http://localhost/') + session.login_with_password('me', 'mypassword', '1.0', 'xen-api-scripts-xenapi.py') + session.xenapi.VM.start(vm_uuid) + session.xenapi.session.logout() + """ + + transport: Incomplete + last_login_method: Incomplete + last_login_params: Incomplete + API_version: Incomplete + xenapi: _Dispatcher + + def __init__( + self, + uri, + transport: Incomplete | None = ..., + encoding: Incomplete | None = ..., + verbose: int = ..., + allow_none: int = ..., + ignore_ssl: bool = ..., + ) -> None: ... + def xenapi_request(self, methodname, params) -> None: ... + + # def __getattr__(self, name) -> None: ... + + +def xapi_local() -> Session: ... diff --git a/python3/stubs/xcp/branding.py b/python3/stubs/xcp/branding.py new file mode 100644 index 00000000000..30ff69600bf --- /dev/null +++ b/python3/stubs/xcp/branding.py @@ -0,0 +1,38 @@ +# Example xcp.branding module as test stub for test mail-alarm. +# python3/stubs is added to PYTHONPATH by pyproject.toml +COPYRIGHT_YEARS = '2009-2024' +PRODUCT_BRAND = 'XenServer' +PRODUCT_BRAND_DASHED = 'xenserver' +PRODUCT_NAME = 'xenenterprise' +COMPANY_NAME_LEGAL = 'Cloud Software Group, Inc.' +COMPANY_NAME_SHORT = 'Cloud Software Group' +COMPANY_DOMAIN = 'xenserver.com' +COMPANY_PRODUCT_BRAND = 'XenServer' +BRAND_CONSOLE = 'XenCenter' +BRAND_SERVER = 'XenServer Host' +BRAND_VDI = 'Virtual Desktops' +BRAND_CONSOLE_URL = 'https://www.xenserver.com/downloads' +ISO_PV_TOOLS_COPYRIGHT = 'Cloud Software Group, Inc. 2009-2024' +ISO_PV_TOOLS_LABEL = 'XenServer VM Tools' +COMPANY_NAME = 'Cloud Software Group, Inc.' +COMPANY = 'Cloud Software Group' +COMPANY_WEBSITE = 'www.xenserver.com' +PLATFORM_NAME = 'XCP' +PLATFORM_ORGANISATION = 'xen.org' +PLATFORM_WEBSITE = 'www.xen.org' +BRAND_GUEST = 'Virtual Machine' +BRAND_GUESTS = 'Virtual Machines' +BRAND_GUEST_SHORT = 'VM' +BRAND_GUESTS_SHORT = 'VMs' +BRAND_SERVERS = 'XenServer Hosts' +ISO_PV_TOOLS_PUBLISHER = 'Cloud Software Group, Inc.' +PRODUCT_MAJOR_VERSION = '8' +PRODUCT_MINOR_VERSION = '4' +PRODUCT_MICRO_VERSION = '0' +PRODUCT_VERSION_TEXT = '8' +PRODUCT_VERSION_TEXT_SHORT = '8' +PLATFORM_MAJOR_VERSION = '3' +PLATFORM_MINOR_VERSION = '4' +PLATFORM_MICRO_VERSION = '0' +PLATFORM_VERSION = '3.4.0' +PRODUCT_VERSION = '8.4.0' diff --git a/python3/stubs/xcp/logger.pyi b/python3/stubs/xcp/logger.pyi new file mode 100644 index 00000000000..f4aa2dab371 --- /dev/null +++ b/python3/stubs/xcp/logger.pyi @@ -0,0 +1,6 @@ +# Minimal stub for xcp.logger module +def debug(*al, **ad) -> None: ... +def error(*al, **ad) -> None: ... +def warning(*al, **ad) -> None: ... +def logToStdout(level) -> bool: ... +def logToSyslog(level) -> bool: ... diff --git a/pytype_reporter.py b/pytype_reporter.py index 877dc29c9d8..4e7d91f172b 100755 --- a/pytype_reporter.py +++ b/pytype_reporter.py @@ -599,10 +599,11 @@ def main(): config_file = "pyproject.toml" config = load_config(config_file, basename(__file__)) config.setdefault("expected_to_fail", []) - debug("Expected to fail: %s", ", ".join(config["expected_to_fail"])) changed_but_in_expected_to_fail = [] - if config["expected_to_fail"] != []: + if config["expected_to_fail"]: + debug("Expected to fail: %s", ", ".join(config["expected_to_fail"])) + changed_but_in_expected_to_fail = git_diff( "--name-only", find_branch_point(config), From c44c4a472ceb5ab5459ba139ec102a881b5ef8af Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 8 May 2024 12:00:00 +0200 Subject: [PATCH 059/341] CA-390883: .pre-commit-config.yaml: Add venvs for running local tests Signed-off-by: Bernhard Kaindl --- .pre-commit-config.yaml | 101 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0ca5ef37fee..668b4190ce1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,6 +14,8 @@ # pre-commit run -av --hook-stage pre-push # default_stages: [commit, push] +default_language_version: + python: python3.11 repos: # Recommendation for a minimal git pre-commit hook: # https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md: @@ -29,6 +31,95 @@ repos: - id: check-executables-have-shebangs exclude: ocaml + +# Improve Python formatting incrementally: +# https://dev.to/akaihola/improving-python-code-incrementally-3f7a +# +# darker checks if staged python changes are formatted according using +# the PEP8-aligned black formatter. It also checks if the imports are sorted. +# +# It is a good idea to run this before committing, and it is also run in the +# GitHub Workflow. +# +# Note: darker only checks the changes in files ending in .py! +# Python scripts that don't end in .py should be renamed to have the .py extension +# when moving them to python3/bin. +# (remove the .py extension in the Makefile when installing the file) +# +- repo: https://github.com/akaihola/darker + rev: 1.7.3 + hooks: + - id: darker + files: python3/ + name: check changes in Python3 tree using darker and isort + args: [--diff, --skip-string-normalization, --isort, -tpy36] + additional_dependencies: [isort] + +# +# Run pytest and diff-cover to check that the new /python3 test suite in passes. +# This hook uses a local venv containing the required dependencies. When adding +# new dependencies, they should be added to the additional_dependencies below. +# +- repo: local + hooks: + - id: pytest + files: python3/ + name: check that the Python3 test suite in passes + entry: env PYTHONDEVMODE=yes sh -c 'python3 -m pytest -vv && + diff-cover --ignore-whitespace --compare-branch=origin/feature/py3 + --show-uncovered --html-report .git/coverage-diff.html + --fail-under 50 .git/coverage3.11.xml' + require_serial: true + pass_filenames: false + language: python + types: [python] + additional_dependencies: + - coverage + - diff-cover + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest-coverage + - pytest-mock + - mock + - wrapt + - XenAPI + + +- repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.361 + hooks: + - id: pyright + name: check that python3 tree passes pyright/VSCode check + files: python3/ + additional_dependencies: + - mock + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest + - pyudev + - XenAPI + + +# Check that pylint passes for the changes in new /python3 code. +- repo: local + hooks: + - id: pylint + files: python3/ + stages: [push] + name: check that changes to python3 tree pass pylint + entry: diff-quality --violations=pylint + --ignore-whitespace --compare-branch=origin/feature/py3 + pass_filenames: false + language: python + types: [python] + additional_dependencies: [diff-cover, pylint, pytest] + + +# pre-push hook (it only runs if you install pre-commit as a pre-push hook): +# It can be manually tested using: `pre-commit run -av --hook-stage push` # Recommendation for a minimal git pre-push hook: # While using pre-commit yields great results, it # is "not fast". Therefore only run it pre-push, @@ -53,4 +144,12 @@ repos: # developers have such version installed, it can be configured here: # language_version: python3.11 require_serial: true - additional_dependencies: [pandas, pytype] + additional_dependencies: + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pandas + - pytest + - pytype + files: python3/ From 9b128cda5e0024e4d9133a6fdcdb2c77cdb6f0cc Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 7 May 2024 12:00:00 +0200 Subject: [PATCH 060/341] CA-390883: Simplify GitHub Workflow to re-use venvs from pre-commit Signed-off-by: Bernhard Kaindl --- .github/workflows/other.yml | 32 +++----------------------------- 1 file changed, 3 insertions(+), 29 deletions(-) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 0284e7d7819..7c00b893e4a 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -45,40 +45,14 @@ jobs: env: SKIP: no-commit-to-branch - - name: Install dependencies only needed for python 2 - if: ${{ matrix.python-version == '2.7' }} - run: pip install enum - - - name: Install dependencies only needed for python 3 - if: ${{ matrix.python-version != '2.7' }} - run: pip install opentelemetry-api opentelemetry-exporter-zipkin-json opentelemetry-sdk pandas pytype toml wrapt pyudev - - - name: Install common dependencies for Python ${{matrix.python-version}} - run: pip install future mock pytest-coverage pytest-mock - - name: Run Pytest for python 2 and get code coverage for Codecov if: ${{ matrix.python-version == '2.7' }} run: > + pip install enum future mock pytest-coverage pytest-mock && pytest - --cov=scripts --cov=ocaml/xcp-rrdd - scripts/ ocaml/xcp-rrdd -vv -rA - --junitxml=.git/pytest${{matrix.python-version}}.xml + --cov=scripts scripts --cov-fail-under 45 -vv -rA --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml - env: - PYTHONDEVMODE: yes - - - name: Run Pytest for python 3 and get code coverage for Codecov - if: ${{ matrix.python-version != '2.7' }} - run: > - pytest - --cov=scripts --cov=ocaml/xcp-rrdd --cov=python3/ - scripts/ ocaml/xcp-rrdd python3/ -vv -rA - --junitxml=.git/pytest${{matrix.python-version}}.xml - --cov-report term-missing - --cov-report xml:.git/coverage${{matrix.python-version}}.xml - env: - PYTHONDEVMODE: yes - name: Upload Python ${{matrix.python-version}} coverage report to Codecov uses: codecov/codecov-action@v3 @@ -102,7 +76,7 @@ jobs: - name: Run pytype checks if: ${{ matrix.python-version != '2.7' }} - run: ./pytype_reporter.py + run: pip install pandas pytype toml && ./pytype_reporter.py env: PR_NUMBER: ${{ github.event.number }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 384b30bd5e577c85e91962461170314c24ec8cb6 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 6 Mar 2024 12:00:00 +0100 Subject: [PATCH 061/341] tests/observer: Cover the changed lines of the merge from master Signed-off-by: Bernhard Kaindl --- python3/packages/observer.py | 2 +- python3/tests/observer/__init__.py | 36 +++++ python3/tests/observer/all.conf | 1 + python3/tests/observer/it_handles_errors.py | 147 ++++++++++++++++++++ python3/tests/observer/it_traces.py | 104 ++++++++++++++ python3/tests/observer/observer.conf | 1 + python3/tests/observer/traced_script.py | 36 +++++ 7 files changed, 326 insertions(+), 1 deletion(-) create mode 100644 python3/tests/observer/__init__.py create mode 100644 python3/tests/observer/all.conf create mode 100644 python3/tests/observer/it_handles_errors.py create mode 100644 python3/tests/observer/it_traces.py create mode 100644 python3/tests/observer/observer.conf create mode 100755 python3/tests/observer/traced_script.py diff --git a/python3/packages/observer.py b/python3/packages/observer.py index 3419c89cd36..8742d21fd97 100644 --- a/python3/packages/observer.py +++ b/python3/packages/observer.py @@ -41,7 +41,7 @@ # We only want to import opentelemetry libraries if instrumentation is enabled # pylint: disable=import-outside-toplevel -DEBUG_ENABLED = False +DEBUG_ENABLED = os.getenv("XAPI_TEST") DEFAULT_MODULES = "LVHDSR,XenAPI,SR,SRCommand,util" FORMAT = "observer.py: %(message)s" handler = SysLogHandler(facility="local5", address="/dev/log") diff --git a/python3/tests/observer/__init__.py b/python3/tests/observer/__init__.py new file mode 100644 index 00000000000..dbdea4ed0d7 --- /dev/null +++ b/python3/tests/observer/__init__.py @@ -0,0 +1,36 @@ +""" +Package providing helper definitions and functions like call_observer() +to run python3/packages/observer.py as a script using runpy.run_path(). +""" + +import os +import runpy +import sys + +from typing import Any, Dict + +testdir = os.path.dirname(__file__) +OBSERVER_PY = os.path.relpath(testdir + "/../../packages/observer.py") +TRACED_SCRIPT = os.path.relpath(testdir + "/traced_script.py") +TRACED_SCRIPT_PRINT = "Hello, I am a print() in traced_script.py.\n" + + +def call_observer(*args: str) -> Dict[str, Any]: + """ + Call the observer.py script and return its globals dictionary for checking it + + Note: This is only possible when the script is run using runpy.run_path() + and the script exits normally (does not raise and Exception like SystemExit). + + Features: + - __name__ is set to "__main__", so the module is run as a script. + - sys.argv is set to the passed arguments + - no mocks are used, so the actual observer.py script is run. + - sets os.environ["OBSERVER_DEBUG"] = "True" to enable debug logging + to let the tests check the debug messages for checking the reading + of the configuration files and setting up tracing. + """ + + os.environ["XAPI_TEST"] = "True" # Enable printing debug messages in observer.py + sys.argv = [OBSERVER_PY, *args] + return runpy.run_path(OBSERVER_PY, run_name="__main__") diff --git a/python3/tests/observer/all.conf b/python3/tests/observer/all.conf new file mode 100644 index 00000000000..843d5d7cc72 --- /dev/null +++ b/python3/tests/observer/all.conf @@ -0,0 +1 @@ +module_names=XenAPI,tests.observer.traced_script \ No newline at end of file diff --git a/python3/tests/observer/it_handles_errors.py b/python3/tests/observer/it_handles_errors.py new file mode 100644 index 00000000000..efe58c56c76 --- /dev/null +++ b/python3/tests/observer/it_handles_errors.py @@ -0,0 +1,147 @@ +""" +Test error handing of python3/packages/observer.py, calling it using call_observer() + +This module contains tests for the error handling functionality of the observer.py +script in the python3/packages directory. + +The tests are executed by calling the observer.py script via the call_observer() +function. The primary focus of these tests is to verify the behavior of the observer.py +script when various errors occur. + +The tests included in this module are: + +1. `it_handles_not_finding_the_script`: + + This test verifies that when the observer.py does not find the script to trace + is not found, it exits with the correct exit code and produces the expected output. + +2. `it_prints_exception_traceback`: + + This test verifies that when the traced script raises an exception, the observer.py + script captures the exception traceback and exits with the correct exit code. + +3. `it_shows_the_usage_message`: + + This test verifies that when the observer.py script is called without any arguments, + it exits with the correct exit code and produces the expected output. + +4. `it_handles_error_exit`: + + This test verifies that when the traced script exits with a non-zero exit code, the + observer.py script captures the exit code and produces the expected output. + +5. `it_does_not_trace_without_config`: + + This test verifies that when observer.py is called without a configuration + file, it does not trace the traced script and produces the expected output. + +The tests are run using the pytest framework and are executed by calling the +call_observer() function, which simulates running the observer.py script from the +command line. +""" + +import os + +import pytest +from pytest import CaptureFixture + +from . import OBSERVER_PY, TRACED_SCRIPT, TRACED_SCRIPT_PRINT, call_observer + + +def it_handles_not_finding_the_script(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and the traced script is not found: + - The test checks that the exit code and the captured output are as expected. + """ + nonexisting_script = "nonexisting_traced_script.py" + with pytest.raises(SystemExit) as exc_info: + call_observer(nonexisting_script, "arg") + + assert exc_info.value.code == 2 # Set as the exit code for a missing script + + # Check that the error message is as expected + with capsys.disabled(): + stderr = capsys.readouterr().err.splitlines() + assert stderr[0] == f"{OBSERVER_PY} {nonexisting_script} arg:" + assert stderr[1] == f"Script not found: {os.getcwd()}/{nonexisting_script}" + + +def it_prints_exception_traceback(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and an invalid argument is passed to to the traced script as its argument: + + - The traced script should raise an exception and exit with 139 + - The test checks that the exit code and the captured output are as expected. + """ + with pytest.raises(SystemExit) as exc_info: + call_observer(TRACED_SCRIPT, "not_an_int") + + # 139 is used as the exit code when an Exception in the traced script was caught + assert exc_info.value.code == 139 + + # Check that the error message is as expected + with capsys.disabled(): + stderr = capsys.readouterr().err.splitlines() + assert stderr[0] == f"{OBSERVER_PY} {TRACED_SCRIPT} not_an_int:" + assert stderr[1] == "Exception in the traced script:" + assert stderr[2] == "invalid literal for int() with base 10: 'not_an_int'" + assert stderr[3] == "Traceback (most recent call last):" + + +def it_shows_the_usage_message(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started as a script without any arguments: + - The test checks that the exit code and the captured output are as expected. + """ + + with pytest.raises(SystemExit) as exc_info: + call_observer() + assert exc_info.value.code == 31 + with capsys.disabled(): + stderr = capsys.readouterr().err + assert stderr == f"{OBSERVER_PY}: usage: command argument list\n" + + +def it_handles_error_exit(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, + and the traced script exits with a non-zero exit code: + - The expected exit code is passed to to the traced script as its argument. + - The traced script should print a message and exit with the given exit code. + - The test checks that the exit code and the captured output are as expected. + """ + + # Passing 1 to the traced script will make it print() and exit with code 1 + with pytest.raises(SystemExit) as exc_info: + call_observer(TRACED_SCRIPT, "1") + assert exc_info.value.code == 1 + with capsys.disabled(): + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + +def it_does_not_trace_without_config(capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started without a configuration file: + + - The expected exit code is passed to to the traced script as its argument. + - The traced script should print a message and exit with 0 + - The test checks that the exit code and the captured output are as expected. + """ + + # Prepare the environment and run the observer.py script + os.environ["OBSERVER_CONFIG_DIR"] = "nonexisting_config_directory" + + # Passing 0 to the traced script will make it print() and exit with code 0 + globs = call_observer(TRACED_SCRIPT, "0") + + with capsys.disabled(): + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + # Check that the observer.py script didn't install the tracing functions + span = globs.get("span") + patch_module = globs.get("patch_module") + assert span and patch_module + assert span.__name__ == "_span_noop" + assert patch_module.__name__ == "_patch_module_noop" diff --git a/python3/tests/observer/it_traces.py b/python3/tests/observer/it_traces.py new file mode 100644 index 00000000000..99179c85a93 --- /dev/null +++ b/python3/tests/observer/it_traces.py @@ -0,0 +1,104 @@ +""" +Test that packages/observer.py, creates a tracer, calling it using call_observer() + +The tests included in this module are: + +1. `it_creates_a_tracer`: + + This test verifies that when the observer.py script is called with a configuration + file, it creates a tracer and sets the span and patch_module functions as expected. + +The tests are run using the pytest framework and are executed by calling the +call_observer() function, which simulates running the observer.py script from the +command line. + +The test directory contains a dummy `observer.conf` (currently empty) configuration +file that is used to enable tracing for the test. +""" + +import os +import types +from typing import Any, Dict + +from pytest import CaptureFixture, LogCaptureFixture + +from . import TRACED_SCRIPT, TRACED_SCRIPT_PRINT, call_observer, testdir + + +def assert_imported_modules(globals_dict_of_observer: Dict[str, Any]): + """Assert that the expected modules were imported by observer.py""" + + observer_modules = globals_dict_of_observer["sys"].modules + imported_modules = [ + "opentelemetry.baggage.propagation", + "opentelemetry.context", + "opentelemetry.exporter.zipkin.json", + "opentelemetry.sdk.resources", + "opentelemetry.sdk.trace.export", + "opentelemetry.trace", + ] + assert all(mod in observer_modules for mod in imported_modules) + + +def it_creates_a_tracer(caplog: LogCaptureFixture, capsys: CaptureFixture[str]): + """ + Given that packages/observer.py is started with a configuration file, it: + - imports the opentelemetry packages [checked by this test] + - reads the configuration file [checked by this test] + - creates a tracer [checked by this test (using caplog)] + - sets the span() and patch_module() [checked by this test] + - runs the traced script [checked by this test] + - traces the script [not yet checked by this test] + """ + os.environ["OBSERVER_CONFIG_DIR"] = os.path.dirname(__file__) + + # Passing 0 to the traced script will make it print() and exit with code 0 + globals_dict_of_observer = call_observer(TRACED_SCRIPT, "0") + + with capsys.disabled(): + # If this test fails in your environment without any changes to the repo, + # check for import errors from observer.py:_init_tracing() in the pytest logs. + + # Get the span and patch_module functions from the module's globals + span = globals_dict_of_observer.get("span") + patch_module = globals_dict_of_observer.get("patch_module") + + # Assert that the span and patch_module are functions + assert callable(span) + assert callable(patch_module) + assert isinstance(span, types.FunctionType) + assert isinstance(patch_module, types.FunctionType) + + # Assert that span and patch_module are the expected tracing functions + assert span.__name__ == "span_of_tracers" + assert span.__qualname__ == "_init_tracing..span_of_tracers" + assert patch_module.__name__ == "_patch_module" + assert patch_module.__qualname__ == "_init_tracing.._patch_module" + + # Assert that the captured output is as expected + assert capsys.readouterr().out == TRACED_SCRIPT_PRINT + + assert_imported_modules(globals_dict_of_observer) + assert_debug_logs(caplog) + + +def assert_debug_logs(caplog: LogCaptureFixture): + """ + Assert that the observer.py script read the configuration file all.conf + by expecting the configuration file and its content in the log messages. + """ + + msg = caplog.messages + if not msg: # pragma: no cover + print("No logs found in caplog, check that debug logging is enabled!") + expected_modules = "{'module_names': 'XenAPI,tests.observer.traced_script'}" + assert msg[1] == f"{testdir}/all.conf: {expected_modules}" + assert msg[2] == "module_names: ['XenAPI', 'tests.observer.traced_script']" + + # Assert that the observer.py script red the observer.conf configuration file + config = """{'otel_resource_attributes': '"key1=value1,key2=value2"'}""" + assert msg[0] == f"configs = ['{testdir}/observer.conf']" + assert msg[3] == f"{testdir}/observer.conf: {config}" + + # Assert that the observer.py script created a tracer + assert msg[4].startswith("tracers=[ "InstrumentMe": + """A method to be traced by packages/observer.py as part of tests""" + + print("Hello, I am a print() in traced_script.py.") + return self + + def return_int(self, return_int: str) -> int: + """A method to be traced by packages/observer.py as part of tests""" + return int(return_int) + + +def main(return_code_string: str) -> int: + """Main of the tested script, to be traced by packages/observer.py.""" + + return InstrumentMe().print().return_int(return_code_string) + + +if __name__ == "__main__": + # Only use sys.exit(ret) raising SystemExit if the return code is not 0 + # to allow test_observer_as_script() to get the globals of observer.py: + ret = main(sys.argv[-1]) + if ret: + sys.exit(ret) From b9735568920272e38fbac137ccde42f61fd1f76f Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 21 May 2024 04:26:21 +0100 Subject: [PATCH 062/341] Disable pylint warnings Signed-off-by: Stephen Cheng --- python3/tests/test_usb_scan.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python3/tests/test_usb_scan.py b/python3/tests/test_usb_scan.py index bf0bad03fef..ad72c0cd928 100644 --- a/python3/tests/test_usb_scan.py +++ b/python3/tests/test_usb_scan.py @@ -61,7 +61,7 @@ def __len__(self): # pragma: no cover def __getitem__(self, name): return self.get_prop().get(name) - +# pylint: disable=too-few-public-methods class MocEnumerator(): def __init__(self, ds): self.ds = ds @@ -70,7 +70,7 @@ def __iter__(self): for d in self.ds: yield MocDevice(d) - +# pylint: disable=too-few-public-methods class MocContext(): def __init__(self, devices, interfaces): self.devices = devices From 256fe98b61d79010e459a0d581966294996ed054 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 12:24:11 +0000 Subject: [PATCH 063/341] CP-47869: Updated documentation to include python3 /doc/content/xapi/storage/_index.md b/doc/content/xapi/storage/_index.md Signed-off-by: Ashwinh --- doc/content/xapi/storage/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/content/xapi/storage/_index.md b/doc/content/xapi/storage/_index.md index c265353869a..009ceabd4bd 100644 --- a/doc/content/xapi/storage/_index.md +++ b/doc/content/xapi/storage/_index.md @@ -245,7 +245,7 @@ From this interface we generate and appear in the` _build/default/python/xapi/storage/api/v5` directory. - On a XenServer host, they are stored in the - `/usr/lib/python2.7/site-packages/xapi/storage/api/v5/` + `/usr/lib/python3.6/site-packages/xapi/storage/api/v5/` directory ### SMAPIv3 Plugins From f785993f244c0b2234f68338a3478d908bc4a9f4 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 12:26:48 +0000 Subject: [PATCH 064/341] CP-47869: Updated documentation to include python3 /ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md Signed-off-by: Ashwinh --- ocaml/doc/wire-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md index 20e39627cc3..cc5734e76b8 100644 --- a/ocaml/doc/wire-protocol.md +++ b/ocaml/doc/wire-protocol.md @@ -463,7 +463,7 @@ XML-RPC and JSON-RPC client libraries. First, initialise python: ```bash -$ python2.7 +$ python3 >>> ``` From c3d96a2819abe1b089df15d54c984bbcb9978195 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 12:55:45 +0000 Subject: [PATCH 065/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate Signed-off-by: Ashwinh --- .../examples/datapath/block/Datapath.activate | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate index 3115f233480..9cda8c0cf23 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.activate @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.activate(request) - print json.dumps(results) + print(json.dumps(results)) From 8e4b50d36fe07aea0f7d0a82196becd461e34187 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 13:01:21 +0000 Subject: [PATCH 066/341] CP-47869: Modified code using 2to3 ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate Signed-off-by: Ashwinh --- .../examples/datapath/block/Datapath.deactivate | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate index 48240856deb..1585a267eb0 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.deactivate @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.deactivate(request) - print json.dumps(results) + print(json.dumps(results)) From b734b7267d45fe0329aca1a77c2962c1f226a1fa Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 13:05:02 +0000 Subject: [PATCH 067/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach Signed-off-by: Ashwinh --- .../examples/datapath/block/Datapath.attach | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach index db6eb6de2eb..6a2ec399460 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.attach @@ -1,15 +1,15 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") import xapi, d -import argparse, json, urlparse +import argparse, json, urllib.parse class Implementation(d.Datapath_skeleton): def attach(self, dbg, uri, domain): - u = urlparse.urlparse(uri) + u = urllib.parse.urlparse(uri) return { 'implementations': [ ['XenDisk', {"backend_type":"vbd", "extra":{}, "params":u.path}], ['BlockDevice', {"path":u.path}] ] } @@ -20,10 +20,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.attach(request) - print json.dumps(results) + print(json.dumps(results)) From ddcda0b14b22c215741ef2333dc823273e5cee14 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 24 May 2024 13:08:38 +0000 Subject: [PATCH 068/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach Signed-off-by: Ashwinh --- .../examples/datapath/block/Datapath.detach | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach index aac2e9d3773..5e42f252943 100755 --- a/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach +++ b/ocaml/xapi-storage-script/examples/datapath/block/Datapath.detach @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import sys sys.path.append("/home/vagrant/djs55/dbus-test/python") @@ -17,10 +17,10 @@ if __name__ == "__main__": args = vars(parser.parse_args()) if not(args['json']): - print "Not implemented" + print("Not implemented") sys.exit(1) dispatcher = d.Datapath_server_dispatcher(Implementation()) - request = json.loads(sys.stdin.readline(),) + request = json.loads(sys.stdin.readline()) results = dispatcher.detach(request) - print json.dumps(results) + print(json.dumps(results)) From b4c5764f78ab40162ee8c4478ce22e3e94904564 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 08:46:26 +0000 Subject: [PATCH 069/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py Signed-off-by: Ashwinh --- .../examples/datapath/loop+blkback/datapath.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py index ed65d595477..f076b700a6f 100755 --- a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py +++ b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -15,10 +15,10 @@ # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -from __future__ import print_function + import os import sys -import urlparse +import urllib.parse import xapi.storage.api.v5.datapath from xapi.storage.common import call @@ -64,8 +64,8 @@ def activate(self, dbg, uri, domain): pass def attach(self, dbg, uri, domain): - parsed_url = urlparse.urlparse(uri) - query = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(uri) + query = urllib.parse.parse_qs(parsed_url.query) file_path = os.path.realpath(parsed_url.path) @@ -97,7 +97,7 @@ def deactivate(self, dbg, uri, domain): pass def detach(self, dbg, uri, domain): - parsed_url = urlparse.urlparse(uri) + parsed_url = urllib.parse.urlparse(uri) file_path = os.path.realpath(parsed_url.path) From 051f3e3cd3a86b5dc65bc8f03c6b828089659368 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 08:51:48 +0000 Subject: [PATCH 070/341] CP-47869: Update code to be compatible with python3 /ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py Signed-off-by: Ashwinh --- .../python/examples/datapath/loop+blkback/plugin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py index e16a53794a7..4cbc9939fbd 100755 --- a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py +++ b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/plugin.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group,Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published From 93f5fee835966f25e7339b0b82750b5879d8b2e7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 08:56:57 +0000 Subject: [PATCH 071/341] CP-47869: Update code to be compatible with python3 ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py - Changed shebhang and copyright in /ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py Signed-off-by: Ashwinh --- .../org.xen.xapi.storage.simple-file/plugin.py | 4 ++-- .../org.xen.xapi.storage.simple-file/sr.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py index 61a41db978f..583043015ed 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/plugin.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py index 35e96b6ab83..8f2f5ca3942 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -18,8 +18,8 @@ from __future__ import print_function import os import sys -import urllib -import urlparse +import urllib.request, urllib.parse, urllib.error +import urllib.parse import xapi.storage.api.v5.volume from xapi import InternalError @@ -66,12 +66,12 @@ def attach(self, dbg, configuration): # As a simple "stateless" implementation, encode all the # configuration into the URI returned. This is passed back # into volume interface APIs and the stat and ls operations. - return urlparse.urlunparse(( + return urllib.parse.urlunparse(( 'file', '', configuration['path'], '', - urllib.urlencode(configuration, True), + urllib.parse.urlencode(configuration, True), None)) def detach(self, dbg, sr): @@ -96,8 +96,8 @@ def stat(self, dbg, sr): [stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls. """ - parsed_url = urlparse.urlparse(sr) - config = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(sr) + config = urllib.parse.parse_qs(parsed_url.query) description = (config['description'][0] if 'description' in config From 647dc6a6eb66edff1acc96e7b22f2bf25720f4a4 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:18:29 +0000 Subject: [PATCH 072/341] CP-47869: Removed looper2.py from ocaml/tests/tests/ Signed-off-by: Ashwinh --- ocaml/tests/tests/looper2.py | 52 ------------------------------------ 1 file changed, 52 deletions(-) delete mode 100755 ocaml/tests/tests/looper2.py diff --git a/ocaml/tests/tests/looper2.py b/ocaml/tests/tests/looper2.py deleted file mode 100755 index 3e3395653ac..00000000000 --- a/ocaml/tests/tests/looper2.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/python - -print "Program attempts to log into an XAPI server to fetch a list of VMs and" -print "a list of debug objects. It then chooses the first debug object, " -print "queries the int->float map and then calls the 'recycle' message using" -print "that map as an argument" -print - -import getopt, sys, xapi - -url = "http://localhost:8086" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] - -print "Connecting to server on URL: ", url -print "(change with -u argument)" - -# Create an object to represent our server. -server = xapi.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.login_with_password("user", "passwd") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.get_all(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] - -debug_objs = server.Debug.get_all(session) -debug = debug_objs[0] -ifm = server.Debug.get_int_float_map(session, debug) -print "Got an int->float map: " + repr(ifm) - -print "doing the int_float_map recycle thing" - -attempt = 0 -while 1: - this = server.Debug.recycle_int_float_map(ifm) - if ifm <> this: - print "Got a different response!" - print "this = ", repr(this) - print "ifm = ", repr(ifm) - raise "Failed" - attempt = attempt + 1 - print attempt From 5a3a533f898eeb359b063a3a238e12903748f0b3 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:20:20 +0000 Subject: [PATCH 073/341] CP-47869: Removed looper.py from ocaml/tests/tests/ Signed-off-by: Ashwinh --- ocaml/tests/tests/looper.py | 44 ------------------------------------- 1 file changed, 44 deletions(-) delete mode 100755 ocaml/tests/tests/looper.py diff --git a/ocaml/tests/tests/looper.py b/ocaml/tests/tests/looper.py deleted file mode 100755 index 8977fc6efec..00000000000 --- a/ocaml/tests/tests/looper.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python - -print "Program attempts to log into an XAPI server, fetch a list of VMs and" -print "then calls VM.get_otherConfig on the first one in a loop" -print -import getopt, sys, xapi - -url = "http://localhost:8086" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] -print "Connecting to server on URL: ", url -print "(change with -u argument)" - -# Create an object to represent our server. -server = xapi.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.login_with_password("user", "passwd", "1.0", "xen-api-tests-looper") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.get_all(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] - -print "Getting the otherConfig of " + first_vm - -attempt = 0 -last = server.VM.get_otherConfig(session, first_vm) -while 1: - this = server.VM.get_otherConfig(session, first_vm) - if last <> this: - print "Got a different response!" - print "this = ", repr(this) - print "last = ", repr(last) - raise "Failed" - attempt = attempt + 1 - print attempt From 88f5fd8306b16efb96e757b11b78e61797550864 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 09:08:03 +0000 Subject: [PATCH 074/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py Signed-off-by: Ashwinh --- .../volume.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py index d97ceb4ab5d..6593a8fd536 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/volume.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -# Copyright (C) Citrix Systems Inc. +# Copyright (C) Cloud Software Group, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published @@ -21,8 +21,9 @@ import os import sys import uuid -import urllib -import urlparse +import urllib.request +import urllib.parse +import urllib.error import xapi.storage.api.v5.volume from xapi.storage import log @@ -31,8 +32,8 @@ class Implementation(xapi.storage.api.v5.volume.Volume_skeleton): def parse_sr(self, sr_uri): - parsed_url = urlparse.urlparse(sr_uri) - config = urlparse.parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(sr_uri) + config = urllib.parse.parse_qs(parsed_url.query) return parsed_url, config def create_volume_data(self, name, description, size, uris, uuid): @@ -50,8 +51,8 @@ def create_volume_data(self, name, description, size, uris, uuid): } def volume_uris(self, sr_path, name, size): - query = urllib.urlencode({'size': size}, True) - return [urlparse.urlunparse( + query = urllib.parse.urlencode({'size': size}, True) + return [urllib.parse.urlunparse( ('loop+blkback', None, os.path.join(sr_path, name), None, query, None))] @@ -187,7 +188,7 @@ def ls(self, dbg, sr): """ [ls sr] lists the volumes from [sr] """ - parsed_url = urlparse.urlparse(sr) + parsed_url = urllib.parse.urlparse(sr) sr_path = parsed_url.path files = glob.glob(os.path.join(sr_path, '*.inf')) log.debug('files to list {}'.format(files)) From 655ca9a0cd94e89953be922709da322362447032 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 09:12:27 +0000 Subject: [PATCH 075/341] CP-47869: Changed shebang to python3 /ocaml/xapi-storage/python/xapi/storage/__init__.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/__init__.py b/ocaml/xapi-storage/python/xapi/storage/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 From 981f142bebb5e08158c3931cb7dd5be67989b91f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 09:13:28 +0000 Subject: [PATCH 076/341] CP-47869: Changed shebang to python3 /ocaml/xapi-storage/python/xapi/storage/common.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/storage/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/common.py b/ocaml/xapi-storage/python/xapi/storage/common.py index a311446a416..e8d34869277 100644 --- a/ocaml/xapi-storage/python/xapi/storage/common.py +++ b/ocaml/xapi-storage/python/xapi/storage/common.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 from xapi.storage import log import xapi From 26acd4817e6fc41b6c2af87a7decf3efa4751b7d Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 09:16:55 +0000 Subject: [PATCH 077/341] CP-47869: Changed shebang to python3 /ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py b/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/v5/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 From 333bec8a575271088fa94c532b7640561552b131 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 09:42:19 +0000 Subject: [PATCH 078/341] CP-47869: Replaced <> with != to support python3 /scripts/examples/python/provision.py Signed-off-by: Ashwinh --- scripts/examples/python/provision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/examples/python/provision.py b/scripts/examples/python/provision.py index 4c5ab11daef..3b8a224ffae 100644 --- a/scripts/examples/python/provision.py +++ b/scripts/examples/python/provision.py @@ -107,5 +107,5 @@ def setProvisionSpec(session, vm, ps): txt2 = printProvisionSpec(ps) print(txt2) if txt != txt2: - raise AssertionError("Sanity-check failed: print(parse(print(x))) <> print(x)") + raise AssertionError("Sanity-check failed: print(parse(print(x))) != print(x)") print("* OK: print(parse(print(x))) == print(x)") From d73690d63928748de4e54cb1c494d289d3ec1de9 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:38:03 +0000 Subject: [PATCH 079/341] CP-47869: Modified code using 2to3 /ocaml/xapi-storage/python/xapi/__init__.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index d2a0eed3f94..1960e549d46 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ -Copyright (c) 2013-2018, Citrix Inc. +Copyright (c) 2013-2024, Cloud Software Group,Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -72,7 +72,7 @@ class XenAPIException(Exception): def __init__(self, code, params): Exception.__init__(self) - if not isinstance(code, str) and not isinstance(code, unicode): + if not isinstance(code, str) and not isinstance(code, str): raise TypeError("string", repr(code)) if not isinstance(params, list): raise TypeError("list", repr(params)) @@ -139,7 +139,7 @@ def __init__(self, name): def is_long(x): try: - long(x) + int(x) return True except ValueError: return False From 2954895720742164af3a2984ca929cf85f76aef7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:39:58 +0000 Subject: [PATCH 080/341] CP-47869: Changed shebang to python3 ocaml/xapi-storage/python/xapi/storage/api/__init__.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/storage/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/__init__.py b/ocaml/xapi-storage/python/xapi/storage/api/__init__.py index 18ff5363796..e5a0d9b4834 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/__init__.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/__init__.py @@ -1 +1 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 From 756b9f25c5e19231c1731212186dce2080c060f1 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:43:02 +0000 Subject: [PATCH 081/341] CP-47869: Modified to python3 /ocaml/xapi-storage/python/Makefile Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/Makefile b/ocaml/xapi-storage/python/Makefile index bc8eff9b851..a2ccad97c8c 100644 --- a/ocaml/xapi-storage/python/Makefile +++ b/ocaml/xapi-storage/python/Makefile @@ -1,5 +1,5 @@ PREFIX?=/usr -PYTHON?=python2 +PYTHON?=python3 .PHONY: build release clean install uninstall From 99293187a4a1bcd3a29dc0842a78c171d4d20bb6 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 28 May 2024 09:38:39 +0000 Subject: [PATCH 082/341] CP-47869: Updated to python3 .github/workflows/setup-xapi-environment/action.yml Signed-off-by: Ashwinh --- .github/workflows/setup-xapi-environment/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/setup-xapi-environment/action.yml b/.github/workflows/setup-xapi-environment/action.yml index d46ae3a5b96..e32110ad977 100644 --- a/.github/workflows/setup-xapi-environment/action.yml +++ b/.github/workflows/setup-xapi-environment/action.yml @@ -27,9 +27,9 @@ runs: shell: bash run: sudo apt-get update - - name: Install python2 + - name: Install python3 shell: bash - run: sudo apt-get install python2 + run: sudo apt-get install python3 - name: Use disk with more space for TMPDIR and XDG_CACHE_HOME shell: bash From 5d0a5a687ace12454712fced475ec0e3f362eabf Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 3 Jun 2024 09:25:04 +0000 Subject: [PATCH 083/341] CP-47869: Fixed No attribute 'server' on module 'xmlrpc.client' [module-attr] in /scripts/examples/python/mini-xenrt.py Signed-off-by: Ashwinh --- scripts/examples/python/mini-xenrt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/examples/python/mini-xenrt.py b/scripts/examples/python/mini-xenrt.py index 0907132da80..b30e9d9973c 100644 --- a/scripts/examples/python/mini-xenrt.py +++ b/scripts/examples/python/mini-xenrt.py @@ -109,7 +109,7 @@ def make_operation_list(vm): print(" -- performs parallel operations on VMs with the specified other-config key") sys.exit(1) - x = xmlrpc.client.server(sys.argv[1]) + x = xmlrpc.client.ServerProxy(sys.argv[1]) key = sys.argv[2] session = x.session.login_with_password("root", "xenroot", "1.0", "xen-api-scripts-minixenrt.py")["Value"] vms = x.VM.get_all_records(session)["Value"] From e061a29549144f8fab3adf6b4170df6b0918590b Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 3 Jun 2024 11:18:22 +0000 Subject: [PATCH 084/341] CP-47869: fixed Name 'FilenotFoundError' is not defined [name-error] in scripts/examples/smapiv2.py Signed-off-by: Ashwinh --- scripts/examples/smapiv2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/examples/smapiv2.py b/scripts/examples/smapiv2.py index cc990dcadf2..1047f57825c 100644 --- a/scripts/examples/smapiv2.py +++ b/scripts/examples/smapiv2.py @@ -13,7 +13,7 @@ def reopenlog(log_file): if log_file: try: log_f = open(log_file, "a") - except FilenotFoundError: + except FileNotFoundError: log_f = open(log_file, "w") else: log_f = open(os.dup(sys.stdout.fileno()), "a") From 43d9130ed4daca656781f128e64b73ae9525863c Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 3 Jun 2024 11:40:43 +0000 Subject: [PATCH 085/341] CP-47869: removed smapiv2.py and mini-xenrt.py from expected_to_fail in pyproject.toml Signed-off-by: Ashwinh --- pyproject.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5ea22b96551..2fb086f0b11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -251,11 +251,9 @@ expected_to_fail = [ # SSLSocket.send() only accepts bytes, not unicode string as argument: "scripts/examples/python/exportimport.py", # Other fixes needed: - "scripts/examples/python/mini-xenrt.py", "scripts/examples/python/XenAPI/XenAPI.py", "scripts/examples/python/monitor-unwanted-domains.py", "scripts/examples/python/shell.py", - "scripts/examples/smapiv2.py", "scripts/static-vdis", "scripts/plugins/extauth-hook-AD.py", ] From 0e72cb68f537c6947076b0b68dffae202b1ff561 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Fri, 24 May 2024 12:00:00 +0200 Subject: [PATCH 086/341] CI/coverage: Fix addopts, migrate away from pytest-cov pyproject.toml: - Don't use [tool.pytest.ini_options].addopts to pass test paths: addopts forces those options to be used every time pytest is run, which is very restrictive. Instead, use `coverage run` to configure coverage options, and support running specific tests by passing them as arguments to pytest: coverage run -m pytest python3/tests/test_xenapi.py .pre-commit-config.yaml: - No longer rely on [tool.pytest.ini_options].addopts to use pytest-cov. Instead, use `coverage run` to run pytest with coverage, and then `coverage xml` to get an xml coverage dump of the coverage, as well as `coverage html` to generate html for viewing coverage locally, and `coverage report` to generate a brief textual report of the coverage. This also improves the configuration to show the coverage report after the test results have been shown and not in the middle of it. Signed-off-by: Bernhard Kaindl --- .pre-commit-config.yaml | 3 ++- pyproject.toml | 31 ++++++++++++++++++------------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 668b4190ce1..d714b01cd6e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,7 +65,8 @@ repos: - id: pytest files: python3/ name: check that the Python3 test suite in passes - entry: env PYTHONDEVMODE=yes sh -c 'python3 -m pytest -vv && + entry: env PYTHONDEVMODE=yes sh -c 'coverage run && coverage xml && + coverage html && coverage report && diff-cover --ignore-whitespace --compare-branch=origin/feature/py3 --show-uncovered --html-report .git/coverage-diff.html --fail-under 50 .git/coverage3.11.xml' diff --git a/pyproject.toml b/pyproject.toml index 5ea22b96551..abcdd512aab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,8 @@ skip_covered = true [tool.coverage.run] # Default command line for "coverage run": Run pytest in non-verbose mode -command_line = "-m pytest -p no:logging -p no:warnings" +command_line = "-m pytest -v -ra" + # Default data file for "coverage run": Store coverage data in .git/.coverage data_file = ".git/.coverage" # Default context for "coverage run": Use the name of the test function @@ -75,7 +76,7 @@ relative_files = true # Default output when writing "coveragle xml" data. This needs to match what # diff-cover and coverage upload to Codecov expect [tool.coverage.xml] -output = ".git/coverage.xml" +output = ".git/coverage3.11.xml" # Default output directory for writing "coverage html" data. @@ -199,18 +200,22 @@ exclude = [ # ----------------------------------------------------------------------------- # Options to enable for pytest by default: # -v show what happens -# -rA show summary after running tests -# --cov=python3 measure coverage of the python3 directory -# --cov-fail-under minimum coverage percentage -# --cov-report=term-missing show missing lines in the coverage report -# --cov-report=html: generate an HTML coverage report(for viewing) -# --cov-report=xml: generate an XML coverage report(for upload) +# -ra show short summary after running tests +# Other options should not be passed using addopts, as addopts forces those +# options to be used every time pytest is run, which is very restrictive. +# Instead, use `coverage run` to configure coverage options, and support +# running specific tests by passing them as arguments to pytest: +# For example: +# coverage run -m pytest python3/tests/test_xenapi.py +# Adding specific --cov options using addopts is not recommended as it would +# require to use the pytest-cov plugin, which would conflict with the use of +# `coverage run`. Instead, use `coverage` to configure coverage options. +# Specifying directories to test is better done using the testpaths option, +# as testpaths sets the default directories to search for tests, but does not +# force them to be run, so you can still run specific tests files by just +# passing them as arguments to pytest: pytest python3/tests/test_xenapi.py # ----------------------------------------------------------------------------- -addopts = """ --v -rA --cov=python3 --cov=scripts --cov-fail-under=50 ---cov-report=html:.git/coverage --cov-report=term-missing ---cov-report=xml:.git/coverage3.11.xml -""" +addopts = "-v -ra" # ----------------------------------------------------------------------------- # Other pytest config options: From ed86b8f1d3234cf4e42a0fff8a1f9a62b1f0855a Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 11 Jun 2024 08:14:39 +0000 Subject: [PATCH 087/341] Revert "CP-47869: Modified code using 2to3 /ocaml/xapi-storage/python/xapi/__init__.py" This reverts commit d73690d63928748de4e54cb1c494d289d3ec1de9. Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index 1960e549d46..d2a0eed3f94 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python2 """ -Copyright (c) 2013-2024, Cloud Software Group,Inc. +Copyright (c) 2013-2018, Citrix Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -72,7 +72,7 @@ class XenAPIException(Exception): def __init__(self, code, params): Exception.__init__(self) - if not isinstance(code, str) and not isinstance(code, str): + if not isinstance(code, str) and not isinstance(code, unicode): raise TypeError("string", repr(code)) if not isinstance(params, list): raise TypeError("list", repr(params)) @@ -139,7 +139,7 @@ def __init__(self, name): def is_long(x): try: - int(x) + long(x) return True except ValueError: return False From 789b9e490022489eb83ee1095b4df15d882fe3e0 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 11 Jun 2024 08:21:19 +0000 Subject: [PATCH 088/341] CP-47869: Changed shebang to python3 /ocaml/xapi-storage/python/xapi/__init__.py Signed-off-by: Ashwinh --- ocaml/xapi-storage/python/xapi/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index d2a0eed3f94..57a7c0c9f2d 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ -Copyright (c) 2013-2018, Citrix Inc. +Copyright (c) 2013-2024, Cloud Software Group,Inc. All rights reserved. Redistribution and use in source and binary forms, with or without From 23ed63da3766b4a0372ce34787d4189d5e23165b Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 11 Jun 2024 08:58:15 +0000 Subject: [PATCH 089/341] CP-47869: Fix pylint reimport issue ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py Signed-off-by: Ashwinh --- .../examples/volume/org.xen.xapi.storage.simple-file/sr.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py index 8f2f5ca3942..07f4f9c0436 100755 --- a/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py +++ b/ocaml/xapi-storage/python/examples/volume/org.xen.xapi.storage.simple-file/sr.py @@ -18,8 +18,9 @@ from __future__ import print_function import os import sys -import urllib.request, urllib.parse, urllib.error +import urllib.request import urllib.parse +import urllib.error import xapi.storage.api.v5.volume from xapi import InternalError From 4e4b35f9db4091a74a0ae9b7bb6e49cd8e8ef79e Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:25:28 +0000 Subject: [PATCH 090/341] CP-47869: Deleted message_switch.py ocaml/message-switch/python/message_switch.py Signed-off-by: Ashwinh --- ocaml/message-switch/python/message_switch.py | 414 ------------------ 1 file changed, 414 deletions(-) delete mode 100755 ocaml/message-switch/python/message_switch.py diff --git a/ocaml/message-switch/python/message_switch.py b/ocaml/message-switch/python/message_switch.py deleted file mode 100755 index 460d4ee2e04..00000000000 --- a/ocaml/message-switch/python/message_switch.py +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012 Citrix Systems Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import json - -class Http_request: - def __init__(self, method, uri, body = None): - self.method = method - self.uri = uri - self.body = body - - def to_string(self): - body = "" - if self.body: - body = self.body - lines = [ - "%s %s HTTP/1.1" % (self.method, self.uri), - "Content-Length: %d" % len(body), - "", - body - ] - return "\r\n".join(lines) - -class Http_response: - def __init__(self, body): - self.body = body - - def to_string(self): - lines = [ - "HTTP/1.1 200 OK", - "Content-Length: %d" % len(self.body), - "", - self.body - ] - return "\r\n".join(lines) - - @classmethod - def of_string(cls, txt): - lines = txt.split("\r\n") - if lines[0] <> "HTTP/1.1 200 OK": - raise "Unexpected status line: %s" % lines[0] - rest = "\r\n".join(lines[3:]) - return cls(rest) - -class Message: - def __init__(self, payload, correlation_id, reply_to = None): - self.payload = payload - self.correlation_id = correlation_id - self.reply_to = reply_to - - def save(self): - result = { - "payload": self.payload, - "correlation_id": self.correlation_id - } - if self.reply_to: - result["reply_to"] = self.reply_to - return result - - @classmethod - def load(cls, x): - payload = x["payload"] - correlation_id = x["correlation_id"] - reply_to = None - if "reply_to" in x: - reply_to = x["reply_to"] - return cls(payload, correlation_id, reply_to) - - def __str__(self): - return json.dumps(self.save()) - -class Login: - def __init__(self, some_credential): - self.some_credential = some_credential - - def to_request(self): - return Http_request("GET", "/login/%s" % self.some_credential) - -class Create_request: - def __init__(self, name = None): - self.name = name - - def to_request(self): - uri = "/create" - if self.name: - uri = uri + "/" + self.name - return Http_request("GET", uri) - -class Create_response: - def __init__(self, name = None): - self.name = name - - @classmethod - def of_response(cls, response): - return cls(response.body) - - def to_response(self): - return Http_response(self.name) - -class Subscribe: - def __init__(self, name): - self.name = name - - def to_request(self): - return Http_request("GET", "/subscribe/%s" % self.name) - -class Send: - def __init__(self, name, message): - self.name = name - self.message = message - def to_request(self): - if self.message.reply_to: - return Http_request("POST", "/send/%s/%d/%s" % (self.name, self.message.correlation_id, self.message.reply_to), self.message.payload) - else: - return Http_request("POST", "/send/%s/%d" % (self.name, self.message.correlation_id), self.message.payload) - -class Transfer_request: - def __init__(self, ack_to, timeout): - self.ack_to = ack_to - self.timeout = timeout - - def to_request(self): - return Http_request("GET", "/transfer/%Ld/%.16g" % (self.ack_to, self.timeout)) - -class Transfer_response: - def __init__(self, messages): - self.messages = messages - - @classmethod - def of_response(cls, response): - x = json.loads(response.body) - result = {} - for (k, v) in x["messages"]: - result[long(k)] = Message.load(v) - return Transfer_response(result) - -class Ack: - def __init__(self, ack): - self.ack = ack - - def to_request(self): - return Http_request("GET", "/ack/%Ld" % self.ack) - -import string, socket - -default_config = { - "ip": "169.254.0.1", # HIMN IP of dom0 - "port": 8080, # default for xenswitch -} - -class End_of_file(Exception): - def __init__(self): - pass -class Bad_status(Exception): - def __init__(self, status): - self.status = status -class Missing_content_length(Exception): - def __init__(self): - pass -class StreamReader: - def __init__(self, sock): - self.sock = sock - self.buffered = "" - def read_fragment(self, n): - if len(self.buffered) > 0: - num_available = min(n, len(self.buffered)) - fragment = self.buffered[0:num_available] - self.buffered = self.buffered[num_available:] - return fragment - else: - self.buffered = self.sock.recv(16384) - if len(self.buffered) == 0: - raise End_of_file() - return self.read_fragment(n) - def read(self, n): - results = "" - while n > 0: - fragment = self.read_fragment(n) - n = n - len(fragment) - results = results + fragment - return results - - def readline(self): - results = "" - eol = False - while not eol: - byte = self.read(1) - if byte == "\n": - eol = True - else: - results = results + byte - return results - -def link_send(sock, m): - sock.sendall(m.to_request().to_string()) - -def link_recv(reader): - status = reader.readline() - if not(status.startswith("HTTP/1.1 200 OK")): - raise Bad_status(status) - content_length = None - eoh = False - while not eoh: - header = reader.readline().strip() - if header == "": - eoh = True - else: - bits = header.split(":") - key = string.lower(bits[0]) - if key == "content-length": - content_length = int(bits[1]) - if content_length == None: - raise Missing_content_length() - body = reader.read(content_length) - return Http_response(body) - -def login(sock, reader, some_credential): - link_send(sock, Login(some_credential)) - link_recv(reader) - -def create(sock, reader, name = None): - link_send(sock, Create_request(name)) - return Create_response.of_response(link_recv(reader)).name - -def subscribe(sock, reader, name): - link_send(sock, Subscribe(name)) - link_recv(reader) - -def send(sock, reader, name, msg): - link_send(sock, Send(name, msg)) - link_recv(reader) - -def transfer(sock, reader, ack_to, timeout): - link_send(sock, Transfer_request(ack_to, timeout)) - return Transfer_response.of_response(link_recv(reader)).messages - -def ack(sock, reader, id): - link_send(sock, Ack(id)) - link_recv(reader) - -from threading import Thread, Event, Lock - -class Receiver(Thread): - def __init__(self, sock, reader, server): - Thread.__init__(self) - self.daemon = True - self.sock = sock - self.reader = reader - self.server = server - self.events = {} - self.replies = {} - def register_correlation_id(self, correlation_id): - event = Event() - self.events[correlation_id] = event - return event - def get_reply(self, correlation_id): - reply = self.replies[correlation_id] - del self.replies[correlation_id] - return reply - def set_listen_callback(self, listen_callback): - self.listen_callback = listen_callback - def run(self): - ack_to = -1L - timeout = 5.0 - while True: - messages = transfer(self.sock, self.reader, ack_to, timeout) - for id in messages.keys(): - ack_to = max(ack_to, id) - m = messages[id] - reply_to = m.reply_to - if reply_to: - reply = self.server.dispatch(m) - send(self.sock, self.reader, reply_to, reply) - ack(self.sock, self.reader, id) - else: - if m.correlation_id not in self.events: - print >>sys.stderr, "Unknown correlation_id: %d" % m.correlation_id - else: - self.replies[m.correlation_id] = m.payload - event = self.events[m.correlation_id] - del self.events[m.correlation_id] - event.set() - -class Connection: - def __init__(self, client, name): - self.client = client - self.name = name - def rpc(self, request): - return self.client.rpc(self.name, request) - -class Server: - def __init__(self): - pass - def dispatch(self, request): - # echo the request back - request.reply_to = None - return request - -class Switch: - def __init__(self, some_credential, config = default_config, server = Server()): - self.some_credential = some_credential - self.config = config - self.server = server - - # Open a connection for requests and one for events - self.request_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.request_sock.connect((config["ip"], config["port"])) - self.request_stream_reader = StreamReader(self.request_sock) - self.request_mutex = Lock() - login(self.request_sock, self.request_stream_reader, some_credential) - - self.event_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.event_sock.connect((config["ip"], config["port"])) - self.event_stream_reader = StreamReader(self.event_sock) - login(self.event_sock, self.event_stream_reader, some_credential) - - self.receiver_thread = Receiver(self.event_sock, self.event_stream_reader, self.server) - self.receiver_thread.start() - self.next_correlation_id = 0 - self.next_correlation_id_mutex = Lock() - - def correlation_id(self): - self.next_correlation_id_mutex.acquire() - try: - correlation_id = self.next_correlation_id - self.next_correlation_id = self.next_correlation_id + 1 - return correlation_id - finally: - self.next_correlation_id_mutex.release() - - def rpc(self, name, request): - correlation_id = self.correlation_id() - event = self.receiver_thread.register_correlation_id(correlation_id) - - self.request_mutex.acquire() - try: - reply_queue = create(self.request_sock, self.request_stream_reader) - subscribe(self.request_sock, self.request_stream_reader, reply_queue) - send(self.request_sock, self.request_stream_reader, name, Message(request, correlation_id, reply_queue)) - finally: - self.request_mutex.release() - - event.wait() - return self.receiver_thread.get_reply(correlation_id) - - def connect(self, service): - self.request_mutex.acquire() - try: - create(self.request_sock, self.request_stream_reader, service) - finally: - self.request_mutex.release() - - return Connection(self, service) - - def listen(self, service): - self.request_mutex.acquire() - try: - create(self.request_sock, self.request_stream_reader, service) - subscribe(self.request_sock, self.request_stream_reader, service) - finally: - self.request_mutex.release() - - -if __name__ == "__main__": - from optparse import OptionParser - import sys, time - - parser = OptionParser() - parser.add_option("-x", "--switch", dest="switch", type="string", - help="address of message switch", metavar="SWITCH") - parser.add_option("-l", "--listen", dest="listen", action="store_true", - help="listen for RPCs, instead of sending them") - parser.add_option("-s", "--service", dest="service", type="string", - help="name of the remote service") - parser.add_option("-c", "--client", dest="client_name", type="string", - help="name which identifies this client") - - (options, args) = parser.parse_args() - config = default_config - if options.switch: - bits = options.switch.split(":") - config["ip"] = bits[0] - if len(bits) == 2: - config["port"] = int(bits[1]) - - client_name = "test_python" - if options.client_name: - client_name = options.client_name - if not options.service: - print >> sys.stderr, "Must provide a --service name" - sys.exit(1) - - if options.listen: - s = Switch(client_name, server = Server()) - s.listen(options.service) - while True: - time.sleep(5) - else: - s = Switch(client_name) - c = s.connect(options.service) - print c.rpc("hello") From 9c066f4680986b71f9eee456a49f84550bdc81a4 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:29:16 +0000 Subject: [PATCH 091/341] CP-47869: Removed message_switch_test.py ocaml/message-switch/core_test Signed-off-by: Ashwinh --- .../core_test/message_switch_test.py | 98 ------------------- 1 file changed, 98 deletions(-) delete mode 100644 ocaml/message-switch/core_test/message_switch_test.py diff --git a/ocaml/message-switch/core_test/message_switch_test.py b/ocaml/message-switch/core_test/message_switch_test.py deleted file mode 100644 index 5566adf8a08..00000000000 --- a/ocaml/message-switch/core_test/message_switch_test.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012 Citrix Systems Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import unittest, os -from message_switch import * - -try: - tmpdir = os.environ["TMPDIR"] -except KeyError: - tmpdir = "/tmp" - -basedir = os.path.join(tmpdir, "link_test") - -rpc_req = Message("hello", 1L, "reply_to") -rpc_res = Message("hello", 1L) - -class Internal_invariants(unittest.TestCase): - def test_Message_save_load(self): - for m in [rpc_req, rpc_res]: - n = Message.load(m.save()) - assert m.payload == n.payload - assert m.correlation_id == n.correlation_id - assert m.reply_to == n.reply_to - -def load(x): - path = os.path.join(basedir, x) - f = open(path, "r") - try: - return f.read() - finally: - f.close() - -class Ocaml_interop(unittest.TestCase): - def test_login(self): - py = Login("hello").to_request().to_string() - ocaml = load("login") - assert py == ocaml - def test_create_named(self): - py = Create_request("service").to_request().to_string() - ocaml = load("create") - assert py == ocaml - def test_create_anon(self): - py = Create_request().to_request().to_string() - ocaml = load("create.anon") - assert py == ocaml - def test_subscribe(self): - py = Subscribe("service").to_request().to_string() - ocaml = load("subscribe") - assert py == ocaml - def test_request(self): - py = Send("service", rpc_req).to_request().to_string() - ocaml = load("request") - assert py == ocaml - def test_response(self): - py = Send("service", rpc_res).to_request().to_string() - ocaml = load("reply") - assert py == ocaml - def test_transfer(self): - py = Transfer_request(3, 5.0).to_request().to_string() - ocaml = load("transfer") - assert py == ocaml - def test_ack(self): - py = Ack(3).to_request().to_string() - ocaml = load("ack") - assert py == ocaml - - def test_create_reply(self): - ocaml = Create_response.of_response(Http_response.of_string(load("create.reply"))) - assert ocaml.name == "service" - def test_transfer_reply(self): - ocaml = Transfer_response.of_response(Http_response.of_string(load("transfer.reply"))) - m = { - 1L: rpc_req, - 2L: rpc_res, - } - py = Transfer_response(m) - for k in py.messages: - assert k in ocaml.messages - assert str(py.messages[k]) == str(ocaml.messages[k]) - for k in ocaml.messages: - assert k in py.messages - assert str(py.messages[k]) == str(ocaml.messages[k]) - -if __name__ == "__main__": - unittest.main() From 5ab6877ae9d2f01a13e3b08f4df062e17d03fd9d Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:31:47 +0000 Subject: [PATCH 092/341] CP-47869: Removed rrdd-example.py ocaml/xcp-rrdd/scripts/rrdd/ Signed-off-by: Ashwinh --- ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100755 ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py diff --git a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py deleted file mode 100755 index e25e0ddf016..00000000000 --- a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -import rrdd, os - -if __name__ == "__main__": - # Create a proxy for communicating with xcp-rrdd. - api = rrdd.API(plugin_id="host_mem") - while True: - # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. - api.wait_until_next_reading(neg_shift=.5) - # Collect measurements. - cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" - vs = os.popen(cmd).read().strip().split() - # Tell the proxy which datasources should be exposed in this iteration. - api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") - api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") - # Write all required information into a file about to be read by xcp-rrdd. - api.update() From 5cf6693306792f8b861dd7ffef1a1186cbb2335c Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 10:33:31 +0000 Subject: [PATCH 093/341] CP-47869: Removed has_vendor_device_test.py ocaml/tests/ Signed-off-by: Ashwinh --- ocaml/tests/has_vendor_device_test.py | 159 -------------------------- 1 file changed, 159 deletions(-) delete mode 100644 ocaml/tests/has_vendor_device_test.py diff --git a/ocaml/tests/has_vendor_device_test.py b/ocaml/tests/has_vendor_device_test.py deleted file mode 100644 index 5d5ceaf542d..00000000000 --- a/ocaml/tests/has_vendor_device_test.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import xmlrpclib -import sys - -s=xmlrpclib.Server("http://localhost/") -sess=s.session.login_with_password("root","xenroot")['Value'] - -pool = s.pool.get_all(sess)['Value'][0] -restrictions = s.pool.get_restrictions(sess,pool)['Value'] - -base_request = {'user_version':'1', 'is_a_template':False, 'affinity':'', 'memory_static_max':'4', 'memory_static_min':'1', 'memory_dynamic_max':'3', 'memory_dynamic_min':'2', 'VCPUs_params':{}, 'VCPUs_max':'1', 'VCPUs_at_startup':'1', 'name_label':'hello', 'name_description':'hi', 'memory_target':'2', 'actions_after_shutdown':'destroy', 'actions_after_reboot':'restart', 'actions_after_crash':'destroy', 'PV_bootloader':'', 'PV_kernel':'', 'PV_ramdisk':'', 'PV_args':'', 'PV_bootloader_args':'', 'PV_legacy_args':'', 'HVM_boot_policy':'', 'HVM_boot_params':{}, 'HVM_shadow_multiplier':1.0, 'platform':{}, 'PCI_bus':'', 'other_config':{}, 'recommendations':'', 'xenstore_data':{}, 'ha_always_run':False, 'ha_restart_priority':'1', 'tags':[], 'blocked_operations':{}, 'protection_policy':'', 'is_snapshot_from_vmpp':False, 'appliance':'', 'start_delay':'0', 'shutdown_delay':'0', 'order':'0', 'suspend_SR':'', 'version':'0', 'generation_id':'', 'hardware_platform_version':'0'} - -# - -def create(): - res = s.VM.create(sess, base_request) - return res - -def create_with_vd(b): - request = base_request.copy() - request['has_vendor_device']=b - return s.VM.create(sess,request) - -# VD in request | OK by license | pool.policy_no_vendor_device | resulting VM.has_vendor_device -# - | False | False | False -# False | False | False | False -# True | False | False | Failure -# - | False | True | False -# False | False | True | False -# True | False | True | Failure - - -def test_with_restriction(): # OK by license column above - # Expect this to be successful on an unlicensed host, and for the field to be 'false' - print("running restricted tests (license says you're not allowed the vendor device)") - - s.pool.set_policy_no_vendor_device(sess,pool,False) - -# - | False | False | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | False | False | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | False | False | Failure - res = create_with_vd(True) - print("Expecting failure: got %s" % res['Status']) - assert(res['Status']=='Failure') - - s.pool.set_policy_no_vendor_device(sess,pool,True) - -# - | False | True | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | False | True | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | False | True | Failure - res = create_with_vd(True) - print("Expecting failure: got %s" % res['Status']) - assert(res['Status']=='Failure') - - - -def test_no_restriction(): - print("running unrestricted tests") - -# - | True | False | True -# False | True | False | False -# True | True | False | True -# - | True | True | False -# False | True | True | False -# True | True | True | True - - s.pool.set_policy_no_vendor_device(sess,pool,False) - -# - | True | False | True - res = create() - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | True | False | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | True | False | True - res = create_with_vd(True) - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - - s.pool.set_policy_no_vendor_device(sess,pool,True) - -# - | True | True | False - res = create() - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# False | True | True | False - res = create_with_vd(False) - vm = res['Value'] - expected = False - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - -# True | True | True | True - res = create_with_vd(True) - vm = res['Value'] - expected = True - found = s.VM.get_has_vendor_device(sess,vm)['Value'] - print("Expecting has-vendor-device to be %s: got %s" % (expected,found)) - assert(expected == found) - - - -if restrictions['restrict_pci_device_for_auto_update'] == "true": - test_with_restriction() -else: - test_no_restriction() - - - - - From 1bebb13bb6728f1e11587818a1842885efea6524 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 11:04:07 +0000 Subject: [PATCH 094/341] CP-47869: Removed mtcerrno-to-ocaml.py scripts/ Signed-off-by: Ashwinh --- scripts/mtcerrno-to-ocaml.py | 63 ------------------------------------ 1 file changed, 63 deletions(-) delete mode 100755 scripts/mtcerrno-to-ocaml.py diff --git a/scripts/mtcerrno-to-ocaml.py b/scripts/mtcerrno-to-ocaml.py deleted file mode 100755 index 399d265f724..00000000000 --- a/scripts/mtcerrno-to-ocaml.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python - -# Convert the MTC exit codes into a disjoint union type. Each line in the file looks like: - -# errdef, MTC_EXIT_SUCCESS, 0, 0, "", - -# Usage: -# cat ../xha.hg/include/mtcerrno.def | ./scripts/mtcerrno-to-ocaml.py > ocaml/xapi/xha_errno.ml - -from __future__ import print_function -import sys - -def parse(file): - all = [] - while True: - line = file.readline() - if line == "": - return all - if line.startswith("errdef, MTC_EXIT"): - bits = line.split(",") - name = bits[1].strip() - code = bits[2].strip() - desc = bits[4].strip() - this = { "name": name, "code": code, "desc": desc } - all.append(this) - -def ctor_name(x): - ctor = x['name'] - return ctor[0].upper() + ctor[1:].lower() - -def make_datatype(all): - print("type code = ") - for x in all: - print("| %s" % ctor_name(x)) - -def to_string(all): - print("let to_string : code -> string = function") - for x in all: - print("| %s -> \"%s\"" % (ctor_name(x), x['name'])) - -def to_description_string(all): - print("let to_description_string : code -> string = function") - for x in all: - print("| %s -> %s" % (ctor_name(x), x['desc'])) - -def of_int(all): - print("let of_int : int -> code = function") - for x in all: - print("| %s -> %s" % (x['code'], ctor_name(x))) - print("| x -> failwith (Printf.sprintf \"Unrecognised MTC exit code: %d\" x)") - -if __name__ == "__main__": - all = parse(sys.stdin) - print("(* Autogenerated by %s -- do not edit *)" % (sys.argv[0])) - make_datatype(all) - to_string(all) - to_description_string(all) - of_int(all) - - - - - From 016d56fa589db52436c374ffa16faac78345b3ad Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 11:04:53 +0000 Subject: [PATCH 095/341] CP-47869: Removed hatests from scripts/ Signed-off-by: Ashwinh --- scripts/hatests | 260 ------------------------------------------------ 1 file changed, 260 deletions(-) delete mode 100755 scripts/hatests diff --git a/scripts/hatests b/scripts/hatests deleted file mode 100755 index 8828820ecb3..00000000000 --- a/scripts/hatests +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import XenAPI -import getopt -import sys -import os -import commands -import random -import time -import httplib -import urllib - -def check(svm, ip): - """ - checking that the pool is in the same condition as before - """ - global master - global masterref - global hosts - global vmrunning - flag = True - masterref2 = svm.xenapi.pool.get_all_records().values()[0]['master'] - if masterref2 != masterref : - print("From " + ip + " point of view the pool master is " + svm.xenapi.host.get_record(masterref2)["address"]) - flag = False - hosts2 = svm.xenapi.host.get_all_records() - if len(hosts) != len(hosts2) : - print("From " + ip + " point of view the number of hosts is changed.") - flag = False - for k in hosts.keys() : - if k not in hosts2 : - print("From " + ip + " point of view " + hosts[k]["address"] + " is not present any more.") - vmrecords2 = svm.xenapi.VM.get_all_records() - vmrunning2 = {} - for k, v in vmrecords2.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) == 0: - vmrunning2[k] = v - if len(vmrunning) != len(vmrunning2) : - print("From " + ip + " point of view some VMs have changed state.") - flag = False - for k, v in vmrunning.iteritems() : - if k not in vmrunning2 : - print("From " + ip + " point of view " + v['name_label'] + " is not online any more.") - if flag : - print("On %s everything is consistent." % ip) - -def help() : - print(""" - Usage: hatests - - where options can be: - -w, --wait wait time between stopping an host and restarting it - (default 120) - - where test can be: - master_hard_failure - master_soft_failure - slave_hard_failure - slave_soft_failure - master_vif_unplug - """) - -###### START ###### - -secs = 120 - -optlist, args = getopt.getopt(sys.argv[1:],"w:h", ["wait=", "help"]) -for o, a in optlist: - if o == "-w" or o == "--wait": - secs = int(a) - elif o == "-h" or o == "--help" : - help() - sys.exit(0) - -if len(args) != 1 : - help() - sys.exit(1) - -##read config file -#config = open(sys.args[1], "r") -#slave = [] -#for line in config : -# type, ip = line.lstrip().split() -# if type == "master" : -# master = ip -# else : -# slave.append(ip) - -#connection -s = XenAPI.Session('http://localhost') -s.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - -#Getting all the installed and running VMs with dom-id > 0 -slaves = [] -master = None -vmrecords = s.xenapi.VM.get_all_records() -for k, v in vmrecords.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) > 0: - ip = commands.getoutput("xenstore-ls /local/domain/" + v['domid'] + " | grep ip") - try: - ip = ip.split()[2] - ip = ip[1:-1] - slaves.append((k, ip)) - except: - print("VM in dom" + v['domid'] + " doesn't have an IP address") - -#finding out which one is the master -svm = XenAPI.Session("http://" + slaves[0][1]) -try : - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - masterref = svm.xenapi.pool.get_all_records().values()[0]['master'] - masterrecord = svm.xenapi.host.get_record(masterref) - masterip = masterrecord['address'] -except XenAPI.Failure as inst: - masterip = inst.details[1] - svm = XenAPI.Session("http://" + masterip) - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - masterref = svm.xenapi.pool.get_all_records().values()[0]['master'] -for i in slaves : - if masterip == i[1] : - master = i - slaves.remove(i) - break -print("Master ip address is " + master[1]) - -#getting ip -> hostref references -hosts = {} -hostsrecs = svm.xenapi.host.get_all_records() -for k, v in hostsrecs.iteritems() : - hosts[v['address']] = k - -#getting the VM running -vmrunning = {} -vmrecords = svm.xenapi.VM.get_all_records() -for k, v in vmrecords.iteritems() : - if v['power_state'] == 'Running' and int(v['domid']) == 0: - vmrunning[k] = v - -bringup = None -vifbringup = None -if sys.argv[-1] == "master_hard_failure" : - print("Shutting down the master") - s.xenapi.VM.hard_shutdown(master[0]) - bringup = master[0] -elif sys.argv[-1] == "master_soft_failure" : - print("Shutting down the master") - s.xenapi.VM.clean_shutdown(master[0]) - bringup = master[0] -elif sys.argv[-1] == "slave_hard_failure" : - r = random.randint(0, len(slaves) - 1) - print("Shutting down slave " + slaves[r][1]) - s.xenapi.VM.hard_shutdown(slaves[r][0]) - bringup = slaves[r][0] -elif sys.argv[-1] == "slave_hard_failure" : - r = random.randint(0, len(slaves) - 1) - print("Shutting down slave " + slaves[r][1]) - s.xenapi.VM.clean_shutdown(slaves[r][0]) - bringup = slaves[r][0] -elif sys.argv[-1] == "master_vif_unplug" : - print("Unplugging the first found attached VIF in the master") - allvifs = s.xenapi.VIF.get_all_records() - for k, v in allvifs.iteritems() : - if v['currently_attached'] and v['VM'] == master[0]: - vifbringup = k - s.xenapi.VIF.unplug(vifbringup) - break - - -print("Waiting " + str(secs) + " seconds") -count = 0 -while count < secs : - time.sleep(1) - sys.stdout.write(".") - sys.stdout.flush() - count = count + 1 -sys.stdout.write("\n") - -if bringup is not None : - print("Bringing the host up again") - s.xenapi.VM.start(bringup, False, True) -if vifbringup is not None : - print("Plugging the VIF back again") - s.xenapi.VIF.plug(vifbringup) - -print("Waiting " + str(secs) + " seconds") -count = 0 -while count < secs : - time.sleep(1) - sys.stdout.write(".") - sys.stdout.flush() - count = count + 1 -sys.stdout.write("\n") - -print("Collecting logs now...") -try : - fileout = open("master-" + master[1] + "-log.tar.bz2", "w") - f = urllib.urlopen("http://root:xenroot@" + master[1] + "/system-status?host_id=" + hosts[master[1]]) - buf = f.read(50) - if len(buf) == 0 : - print(master[1] + " returned an empty log.") - else : - print("Wrote master log to master-" + master[1] + "-log.tar.bz2") - while len(buf) > 0 : - fileout.write(buf) - buf = f.read(50) -except IOError: - print("Unable to connect to %s: network error." % master[1]) -try: - fileout.close() - f.close() -except: - pass - -for k, ip in slaves : - try : - fileout = open("slave-" + ip + "-log.tar.bz2", "w") - f = urllib.urlopen("http://root:xenroot@" + ip + "/system-status?host_id=" + hosts[ip]) - buf = f.read(50) - if len(buf) == 0 : - print(ip + " returned an empty log.") - else : - print("Wrote slave " + ip + " log to slave-" + ip + "-log.tar.bz2") - while len(buf) > 0 : - fileout.write(buf) - buf = f.read(50) - except IOError: - print("Unable to connect to %s: network error." % ip) - try: - fileout.close() - f.close() - except: - pass - -#checking if everything is still OK -print("Connecting to " + master[1] + "...") -svm = XenAPI.Session("http://" + master[1]) -try : - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - check(svm, master[1]) -except XenAPI.Failure as inst: - if inst.details[0] == "HOST_IS_SLAVE" : - print(master[0] + " is not master any more") -except IOError: - print("Unable to connect to %s: network error." % master[1]) - -for slave in slaves : - print("Connecting to " + slave[1] + "...") - svm = XenAPI.Session("http://" + slave[1]) - try: - svm.login_with_password('root', 'xenroot', '1.0', 'xen-api-scripts-hatest') - print("Connection succeeded! Is %s still a slave?" % slave[1]) - check(svm, slave[1]) - except XenAPI.Failure as inst: - if inst.details[0] == "HOST_IS_SLAVE" : - print("Connection failed because %s is still a slave." % slave[1]) - else : - print("Unable to connect to %s: XenAPI failure." % slave[1]) - except IOError: - print("Unable to connect to %s: network error." % slave[1]) From 6a38aaa2c8f86f29424a771936738944e73f2df9 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 11:05:34 +0000 Subject: [PATCH 096/341] CP-47869: Removed time-vm-boots.py from scripts/ Signed-off-by: Ashwinh --- scripts/time-vm-boots.py | 168 --------------------------------------- 1 file changed, 168 deletions(-) delete mode 100755 scripts/time-vm-boots.py diff --git a/scripts/time-vm-boots.py b/scripts/time-vm-boots.py deleted file mode 100755 index 85ec19f20f8..00000000000 --- a/scripts/time-vm-boots.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2006-2007 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - -# Simple python example to demonstrate the event system. Logs into the server, -# registers for events on the VM_guest_metrics and computes the time taken for -# the guest agent to report an IP address. - -from __future__ import print_function -import XenAPI -import sys -import time - -vgm_to_vm = {} - - -def register_vm_metrics(session, vm_ref, vgm): - global vgm_to_vm - - try: - # avoid putting invalid references in the cache - tmp = session.xenapi.VM_guest_metrics.get_other(vgm) - vgm_to_vm[vgm] = vm_ref - except: - pass - - -def vm_of_metrics(ref): - global vgm_to_vm - if not(ref in vgm_to_vm.keys()): - return None - return vgm_to_vm[ref] - -interesting_vms = [] -vm_boot_times = {} -boots_seen = 0 - - -def dump_table(session): - global vm_boot_times - for vm_ref in vm_boot_times.keys(): - name = session.xenapi.VM.get_name_label(vm_ref) - print("%s %s" % (name, vm_boot_times[vm_ref])) - - -def seen_possible_boot(session, vm_ref): - global vm_boot_times - global interesting_vms - global boots_seen - if not(vm_ref in vm_boot_times.keys()) and vm_ref in interesting_vms: - t = time.strftime( "%Y%m%dT%H:%M:%SZ", time.gmtime()) - vm_boot_times[vm_ref] = t - boots_seen += 1 - - name = session.xenapi.VM.get_name_label(vm) - print("%d %s %s" % (boots_seen, name, t), file=sys.stdout) - print("%d %s %s" % (boots_seen, name, t), file=sys.stderr) - sys.stderr.flush() - - -def process_guest_metrics(session, ref, snapshot): - if "other" in snapshot.keys(): - other = snapshot["other"] - if "feature-shutdown" in other.keys(): - the_vm = vm_of_metrics(ref) - seen_possible_boot(session, the_vm) - - -def poll_metrics(session): - while True: - time.sleep(10) - all_recs = session.xenapi.VM_guest_metrics.get_all_records() - for ref in all_recs.keys(): - snapshot = all_recs[ref] - process_guest_metrics(session, ref, snapshot) - - -def process_metrics_event(session, ref): - vm_ref = vm_of_metrics(ref) - if vm_ref is None: - return - if session.xenapi.VM.get_power_state(vm_ref) != "Running": - return - other = {} - try: - other=session.xenapi.VM_guest_metrics.get_other(ref) - except Exception as e: - print(repr(e)) - - if "feature-shutdown" in other.keys(): - seen_possible_boot(session, vm_ref) - - -def watch_events_on_vm(session): - try: - token = '' - call_timeout = 30.0 - while True: - output = session.xenapi.event_from(["VM", "VM_guest_metrics"], token, call_timeout) - events = output['events'] - token = output['token'] - - for event in events: - if event['operation'] == 'del': - continue - if event['class'] == 'vm' and event['operation'] == 'mod': - register_vm_metrics(session, event['ref'], event['snapshot']['guest_metrics']) - continue - if event['class'] == 'vm_guest_metrics': - process_metrics_event(session, event['ref']) - continue - - except XenAPI.Failure as e: - print(e.details) - sys.exit(1) - finally: - session.xenapi.session.logout() - - -if __name__ == "__main__": - if len(sys.argv) > 4 or len(sys.argv) < 2: - print(""" -Watches all offline VMs for boots -Usage: - %s -or - %s [http://]localhost [] [] -""" % (sys.argv[0], sys.argv[0])) - sys.exit(1) - - url = sys.argv[1] - username = sys.argv[2] if len(sys.argv) > 2 else "" - password = sys.argv[3] if len(sys.argv) > 3 else "" - - if url == "http://localhost" or url == "localhost": - new_session = XenAPI.xapi_local() - else: - new_session = XenAPI.Session(url) - - # First acquire a valid session by logging in - try: - new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-timevmboots.py") - except XenAPI.Failure as f: - print("Failed to acquire a session: %s" % f.details) - sys.exit(1) - - # We start watching all Halted VMs - all_halted_vms = new_session.xenapi.VM.get_all_records() - for vm in all_halted_vms.keys(): - vm_rec = all_halted_vms[vm] - if vm_rec["power_state"] == "Halted" and not vm_rec["is_a_template"]: - interesting_vms.append(vm) - print("Watching %d offline VMs" % (len(interesting_vms)), file=sys.stderr) - - watch_events_on_vm(new_session) From 5a310c00f232352eaf32cd6d6199163ff8705b0e Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 11:07:12 +0000 Subject: [PATCH 097/341] CP-47869: Removed debian from scripts/templates/ Signed-off-by: Ashwinh --- scripts/templates/debian | 144 --------------------------------------- 1 file changed, 144 deletions(-) delete mode 100644 scripts/templates/debian diff --git a/scripts/templates/debian b/scripts/templates/debian deleted file mode 100644 index 9350a40a57d..00000000000 --- a/scripts/templates/debian +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2005-2007 XenSource, Inc - -# Code ripped out of 'xgt' script for now -from __future__ import print_function -import commands, xmlrpclib, os, sys, httplib, socket, urllib2, signal - -verbose = True - -##### begin hack. Provide xmlrpc over UNIX domain socket (cut+pasted from eliloader): -class UDSHTTPConnection(httplib.HTTPConnection): - """ Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain - sockets. """ - def connect(self): - path = self.host.replace("_", "/") - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.sock.connect(path) - -class UDSHTTP(httplib.HTTP): - _connection_class = UDSHTTPConnection - -class UDSTransport(xmlrpclib.Transport): - def make_connection(self, host): - return UDSHTTP(host) - -def xapi_local(): - return xmlrpclib.Server("http://_var_xapi_xapi/", transport=UDSTransport()) -##### end hack. - - -class CommandException(Exception): - pass - - -def run(cmd, *args): - debug("+ " + cmd % args) - (ret, out) = commands.getstatusoutput(cmd % args) - if verbose: - try: - for line in out.split("\n"): - log("| " + line) - except TypeError as e: - pass - if ret != 0: - debug ("run - command %s failed with %d" , cmd, ret) - raise CommandException(out) - return out - -def log(fmt, *args): - print(fmt % args) - -def debug(msg, *args): - if verbose: - print(msg % args) - -def create_partition(lvpath): - # 1. write a partition table: - pipe = os.popen('/sbin/fdisk %s' % lvpath, 'w') - - pipe.write('n\n') # new partition - pipe.write('p\n') # primary - pipe.write("1\n") # 1st partition - pipe.write('\n') # default start cylinder - pipe.write('\n') # size: as big as image - pipe.write('w\n') # write partition table - - # XXX we must ignore certain errors here as fdisk will - # sometimes return non-zero signalling error conditions - # we don't care about. Should fix to detect these cases - # specifically. - rc = pipe.close() - if rc == None: - rc = 0 - log("fdisk exited with rc %d (some non-zero exits can be ignored safely)." % rc) - -def map_partitions(lvpath): - run("/sbin/kpartx -a %s", lvpath) - ps = [] - for line in run("/sbin/kpartx -l %s" % lvpath).split("\n"): - ps.append("/dev/mapper/" + line.split()[0]) - return ps - -def unmap_partitions(lvpath): - run("/sbin/kpartx -d %s", lvpath) - -def umount(mountpoint): - run("umount -l %s",mountpoint) - -if __name__ == "__main__": - #os.setpgrp() - xvda = os.getenv("xvda") - xvdb = os.getenv("xvdb") - debug("Guest's xvda is on %s" % xvda) - debug("Guest's xvdb is on %s" % xvdb) - if xvda == None or xvdb == None: - raise "Need to pass in device names for xvda and xvdb through the environment" - - vm = os.getenv("vm") - - server = xapi_local () - try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - uuid = server.VM.get_uuid(session_id, vm)['Value'] - mountpoint = "/tmp/installer/%s" % (uuid) - finally: - server.session.logout(session_id) - - def sighandler(signum, frame): - umount(mountpoint) - os.killpg(0,signal.SIGKILL) - exit(1) - - signal.signal(signal.SIGTERM,sighandler) - - create_partition(xvda) - create_partition(xvdb) - - try: - xvda_parts = map_partitions(xvda) - - run("/sbin/mkfs.ext3 %s", xvda_parts[0]) - - xgt = "@SHAREDIR@/packages/xgt/%s.xgt" % os.path.basename(sys.argv[0]) - - run("/bin/mkdir -p %s", mountpoint) - try: - run("/bin/mount %s %s", xvda_parts[0], mountpoint) - run("/usr/bin/unzip -p %s root.tar.bz2 | tar -C %s -jx", xgt, mountpoint) - finally: - run("/bin/umount %s", mountpoint) - run("/bin/rmdir %s", mountpoint) - run("/usr/bin/unzip -p %s swap.img | dd of=%s oflag=direct bs=1M", xgt, xvdb) - - try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - vbds = server.VM.get_VBDs(session_id, vm)['Value'] - for i in vbds: - dev = server.VBD.get_userdevice(session_id, i)['Value'] - if dev == "0": - server.VBD.set_bootable(session_id, i, True) - finally: - server.session.logout(session_id) - finally: - unmap_partitions(xvda) From 7fa1739a023a353b9809914db12a01ce1731cd88 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 27 May 2024 11:08:24 +0000 Subject: [PATCH 098/341] CP-47869: Removed ping-master.py from scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/ping-master.py | 42 ------------------------ 1 file changed, 42 deletions(-) delete mode 100755 scripts/scalability-tests/ping-master.py diff --git a/scripts/scalability-tests/ping-master.py b/scripts/scalability-tests/ping-master.py deleted file mode 100755 index 048c5d4c938..00000000000 --- a/scripts/scalability-tests/ping-master.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python - -# Send back-to-back 'Host.get_servertime' calls to simulate the GUI's heartbeat and record latency. - -from __future__ import print_function -import XenAPI, sys, time - -iso8601 = "%Y%m%dT%H:%M:%SZ" - -def main(session): - global iso8601 - pool = session.xenapi.pool.get_all()[0] - host = session.xenapi.pool.get_master(pool) - while True: - start = time.time() - session.xenapi.host.get_servertime(host) - latency = time.time() - start - date = time.strftime(iso8601, time.gmtime(start)) - print("%s %.2f" % (date, latency)) - sys.stdout.flush() - time.sleep(5) - - -if __name__ == "__main__": - if len(sys.argv) != 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - url = sys.argv[1] - if url[:5] != "https": - raise "Must use SSL for a realistic test" - - username = sys.argv[2] - password = sys.argv[3] - - session = XenAPI.Session(url) - session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-pingmaster.py") - try: - main(session) - finally: - session.xenapi.logout() - From 264b414f34de6e66bb25bd2f82a3c7434a6c4bdf Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 28 May 2024 10:56:25 +0000 Subject: [PATCH 099/341] CP-47869: removed scripts/hatests from expected_to_fail in pyproject.toml Signed-off-by: Ashwinh --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index abcdd512aab..764ff6e60e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -250,7 +250,6 @@ discard_messages_matching = [ ] expected_to_fail = [ # Need 2to3 -w and maybe a few other minor updates: - "scripts/hatests", "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", # SSLSocket.send() only accepts bytes, not unicode string as argument: From 1aedb6a5adce6e6a35897d6f9dc0f5a3839a90bb Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 08:51:08 +0000 Subject: [PATCH 100/341] Revert "CP-47869: Removed debian from scripts/templates/" This reverts commit 515a8b2e3da21e584a123960d14601ea69538a92. Signed-off-by: Ashwinh --- scripts/templates/debian | 144 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 scripts/templates/debian diff --git a/scripts/templates/debian b/scripts/templates/debian new file mode 100644 index 00000000000..9350a40a57d --- /dev/null +++ b/scripts/templates/debian @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# Copyright (c) 2005-2007 XenSource, Inc + +# Code ripped out of 'xgt' script for now +from __future__ import print_function +import commands, xmlrpclib, os, sys, httplib, socket, urllib2, signal + +verbose = True + +##### begin hack. Provide xmlrpc over UNIX domain socket (cut+pasted from eliloader): +class UDSHTTPConnection(httplib.HTTPConnection): + """ Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain + sockets. """ + def connect(self): + path = self.host.replace("_", "/") + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.connect(path) + +class UDSHTTP(httplib.HTTP): + _connection_class = UDSHTTPConnection + +class UDSTransport(xmlrpclib.Transport): + def make_connection(self, host): + return UDSHTTP(host) + +def xapi_local(): + return xmlrpclib.Server("http://_var_xapi_xapi/", transport=UDSTransport()) +##### end hack. + + +class CommandException(Exception): + pass + + +def run(cmd, *args): + debug("+ " + cmd % args) + (ret, out) = commands.getstatusoutput(cmd % args) + if verbose: + try: + for line in out.split("\n"): + log("| " + line) + except TypeError as e: + pass + if ret != 0: + debug ("run - command %s failed with %d" , cmd, ret) + raise CommandException(out) + return out + +def log(fmt, *args): + print(fmt % args) + +def debug(msg, *args): + if verbose: + print(msg % args) + +def create_partition(lvpath): + # 1. write a partition table: + pipe = os.popen('/sbin/fdisk %s' % lvpath, 'w') + + pipe.write('n\n') # new partition + pipe.write('p\n') # primary + pipe.write("1\n") # 1st partition + pipe.write('\n') # default start cylinder + pipe.write('\n') # size: as big as image + pipe.write('w\n') # write partition table + + # XXX we must ignore certain errors here as fdisk will + # sometimes return non-zero signalling error conditions + # we don't care about. Should fix to detect these cases + # specifically. + rc = pipe.close() + if rc == None: + rc = 0 + log("fdisk exited with rc %d (some non-zero exits can be ignored safely)." % rc) + +def map_partitions(lvpath): + run("/sbin/kpartx -a %s", lvpath) + ps = [] + for line in run("/sbin/kpartx -l %s" % lvpath).split("\n"): + ps.append("/dev/mapper/" + line.split()[0]) + return ps + +def unmap_partitions(lvpath): + run("/sbin/kpartx -d %s", lvpath) + +def umount(mountpoint): + run("umount -l %s",mountpoint) + +if __name__ == "__main__": + #os.setpgrp() + xvda = os.getenv("xvda") + xvdb = os.getenv("xvdb") + debug("Guest's xvda is on %s" % xvda) + debug("Guest's xvdb is on %s" % xvdb) + if xvda == None or xvdb == None: + raise "Need to pass in device names for xvda and xvdb through the environment" + + vm = os.getenv("vm") + + server = xapi_local () + try: + session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] + uuid = server.VM.get_uuid(session_id, vm)['Value'] + mountpoint = "/tmp/installer/%s" % (uuid) + finally: + server.session.logout(session_id) + + def sighandler(signum, frame): + umount(mountpoint) + os.killpg(0,signal.SIGKILL) + exit(1) + + signal.signal(signal.SIGTERM,sighandler) + + create_partition(xvda) + create_partition(xvdb) + + try: + xvda_parts = map_partitions(xvda) + + run("/sbin/mkfs.ext3 %s", xvda_parts[0]) + + xgt = "@SHAREDIR@/packages/xgt/%s.xgt" % os.path.basename(sys.argv[0]) + + run("/bin/mkdir -p %s", mountpoint) + try: + run("/bin/mount %s %s", xvda_parts[0], mountpoint) + run("/usr/bin/unzip -p %s root.tar.bz2 | tar -C %s -jx", xgt, mountpoint) + finally: + run("/bin/umount %s", mountpoint) + run("/bin/rmdir %s", mountpoint) + run("/usr/bin/unzip -p %s swap.img | dd of=%s oflag=direct bs=1M", xgt, xvdb) + + try: + session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] + vbds = server.VM.get_VBDs(session_id, vm)['Value'] + for i in vbds: + dev = server.VBD.get_userdevice(session_id, i)['Value'] + if dev == "0": + server.VBD.set_bootable(session_id, i, True) + finally: + server.session.logout(session_id) + finally: + unmap_partitions(xvda) From a8338242853a4e3acc5fef991f0894409c56eabd Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:34:44 +0000 Subject: [PATCH 101/341] CP-47869: removed plot-result under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/plot-result | 38 --------------------------- 1 file changed, 38 deletions(-) delete mode 100755 scripts/scalability-tests/plot-result diff --git a/scripts/scalability-tests/plot-result b/scripts/scalability-tests/plot-result deleted file mode 100755 index 830590c306b..00000000000 --- a/scripts/scalability-tests/plot-result +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./plot-result vm_per_host host1 ... hostN -# - -if [ $# -le 1 ]; then - echo "Usage: $0 vm_per_host host1 [host2 ... hostN]" - echo "${0} plot the result of ./stress-tests. Need to have all the resulting .dat files of the test in the current directory. Results are .ps files." - exit 1 -fi - -VM_PER_HOST=$1 - -shift -HOSTS=$@ -MASTER=$1 - -for OP in "start-shutdown" "suspend-resume" "reboot" "live-migrate" "non-live-migrate"; do - STR="" - for HOST in $HOSTS; do - for i in `seq 1 ${VM_PER_HOST}`; do - if [ "${STR}" == "" ] - then - STR="'debian-etch-${HOST}-${i}.${OP}.dat' title '${HOST}-${i}' with lines" - else - STR+=", 'debian-etch-${HOST}-${i}.${OP}.dat' title '${HOST}-${i}' with lines" - fi - done - done - echo "set terminal postscript color eps" > tmp.conf - echo "set output '${OP}.ps'" >> tmp.conf - echo "plot ${STR}" >> tmp.conf - gnuplot tmp.conf -done - - From 554b439e0b06018fc58e726ddd7540f9b4b7c0d4 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:35:13 +0000 Subject: [PATCH 102/341] CP-47869: removed pool-size-tests under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/pool-size-tests | 43 ----------------------- 1 file changed, 43 deletions(-) delete mode 100755 scripts/scalability-tests/pool-size-tests diff --git a/scripts/scalability-tests/pool-size-tests b/scripts/scalability-tests/pool-size-tests deleted file mode 100755 index b3ea46eb9c7..00000000000 --- a/scripts/scalability-tests/pool-size-tests +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./test-pool-size n -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -ne 1 ]; then - echo "Usage: $0 number_of_vm" - echo "Need :" - echo " * ./repeat, ./repeat-clone, ./repeat-start and ./repeat-destroy scripts to be in the same directory that ${0};" - echo " * a pool already set up with a shared NFS storage and a HVM VM called dsl;" - echo " * ${0} must be started on the master of this pool;" - echo "${0} clones , then starts them all, then shutdown them all, then destroy them all. Then it ejects one host of the pool, and do the same tests again until the master remains the last host in the pool. Each operation is recoreded into a .dat file." - exit 1 -fi - -N=${1} -IFS=:',' -HOSTS=`xe host-list --minimal` -MASTER=`xe pool-list params=master --minimal` - -c=`xe host-list --minimal | sed -e 's/,/\n/g' | wc -l` - - -#main loop -for HOST in $HOSTS; -do - if [ ${HOST} != ${MASTER} ]; then - ./repeat-clone ${N} dsl > clone-${c}.dat - ./repeat-start ${N} dsl > start-${c}.dat - ./repeat ${N} shutdown dsl --force > shutdown-${c}.dat - ./repeat-destroy ${N} dsl > destroy-${c}.dat - - echo "Ejecting ${HOST}." - xe pool-eject host-uuid=${HOST} --force - #xe host-forget uuid=${HOST} - ((c--)) - echo "Ejected." - fi -done From b5a0d554548cb4be824c8f10deb94045dd198daa Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:35:49 +0000 Subject: [PATCH 103/341] CP-47869: removed provision-vm under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/provision-vm | 153 ------------------------- 1 file changed, 153 deletions(-) delete mode 100755 scripts/scalability-tests/provision-vm diff --git a/scripts/scalability-tests/provision-vm b/scripts/scalability-tests/provision-vm deleted file mode 100755 index 03fa99663e3..00000000000 --- a/scripts/scalability-tests/provision-vm +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./provision-vm vm_per_host host1 host2 ... hostN -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -le 1 ]; then - echo "Usage: ${0} vm_per_host host1 [host2 ... hostN]" - echo "${0} provisions debiant-etch VMs on each host and installs them on a local VHD disk. Moreover, all the hosts join a common pool." - echo "if PROVISION_VM_WITH_CD is set to 1, then attach guest tools ISO CD-ROM to the initial Debian Etch VM before cloning it." - exit 1 -fi - -VM_PER_HOST=$1 - -shift -HOSTS=$@ -MASTER=$1 - -if [ "${PROVISION_VM_WITH_CD}" == "1" ]; then - DEB="debian-etch-withCD" -else - DEB="debian-etch" -fi - -install-vhd () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - SR=`${XE} sr-list name-label='Local storage' --minimal` - if [ $SR ] - then - -# forget the local storage - echo "[${HOST}] Forgeting local storage." - PBD=`${XE} sr-list uuid=$SR params=PBDs --minimal` - ${XE} pbd-unplug uuid=${PBD} - ${XE} sr-forget uuid=${SR} - echo "[${HOST}] Forgotten." - -# build a local VHD storage - echo "[${HOST}] Creating a local VHD storage." - SR=`${XE} sr-create type=ext name-label=localvhd device-config:device=/dev/sda3` - ${XE} pool-param-set uuid=$(${XE} pool-list params=uuid --minimal) default-SR=${SR} crash-dump-SR=${SR} suspend-image-SR=${SR} - echo "[${HOST}] Created." - - fi -} - -install () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "[${HOST}] Installing the Debian Etch VM." - UUID=`${XE} vm-install new-name-label=${DEB} template="Debian Etch 4.0"` - echo "[${HOST}] Installed." - - echo "[${HOST}] Setting the IP address and the memory size of the VM." - NETWORK=`${XE} network-list bridge=xenbr0 --minimal` - VIF=`${XE} vif-create vm-uuid=${UUID} network-uuid=${NETWORK} device=0` - ${XE} vm-param-set uuid=${UUID} PV-args="noninteractive" - ${XE} vm-param-set uuid=${UUID} memory-static-max="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-static-min="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-dynamic-max="50MiB" - ${XE} vm-param-set uuid=${UUID} memory-dynamic-min="50MiB" - echo "[${HOST}] Set." - - if [ "${PROVISION_VM_WITH_CD}" == "1" ]; then - echo "[${HOST}] Attaching a CD-ROM." - TOOLS_ISO=`${XE} vdi-list is-tools-iso=ture params=name-label --minimal` - ${XE} vm-cd-add vm=${DEB} cd-name=${TOOLS_ISO} device=3 - echo "[${HOST}] Attached." - fi - -} - -#start () { -# HOST=$1 -# XE="xe -u root -pw xenroot -s ${HOST}" -# -# echo "[${HOST}] Starting VM." -# ${XE} vm-start vm="${DEB}" -# UUID=`${XE} vm-list name-label=${DEB} params=uuid --minimal` -# -# echo "[${HOST}] Waiting for the IP address of the VM to appear. This can take a minute or so." -# RC=1 -# while [ ${RC} -ne 0 ] -# do -# sleep 10 -# IP=`${XE} vm-param-get uuid=${UUID} param-name=networks param-key="0/ip"` -# RC=$? -# done -# -# echo "[${HOST}] Debian Etch VM installed (IP=${IP})." -#} - -#shutdown () { -# HOST=$1 -# XE="xe -u root -pw xenroot -s ${HOST}" -# -# echo "[${HOST}] Shutting down the VM." -# ${XE} vm-shutdown vm=${DEB} -# echo "[${HOST}] Shut down." -#} - -clone () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "# vm_number cumulative_time load_average vhd_size" > clone-${DEB}-${HOST}.dat - SR=`${XE} sr-list --minimal name-label=localvhd` - START=$(date +%s) - - for i in `seq 1 ${VM_PER_HOST}`; do - echo "[${HOST}] Cloning VM ${i}/${VM_PER_HOST}." - TMP=`${XE} vm-clone vm=${DEB} new-name-label=${DEB}-${HOST}-${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`${XE} host-data-source-query data-source=loadavg host=${HOST}` - VHDSIZE=`${XE} vdi-list --minimal sr-uuid=${SR} | sed -e 's/,/\n/g' | wc -l` - echo "${i} ${DIFF} ${LOADAVG} ${VHDSIZE}" >> clone-${DEB}-${HOST}.dat - echo "[${HOST}] Done." - done -} - -uninstall () { - HOST=$1 - XE="xe -u root -pw xenroot -s ${HOST}" - - echo "[{$HOST}] Uninstalling the Debian Etch initial VM." - ${XE} vm-uninstall force=true vm=${DEB} - echo "[${HOST}] Uninstalled." -} - -join-master () { - HOST=$1 - if [ ${HOST} != ${MASTER} ] - then - XE="xe -u root -pw xenroot -s ${HOST}" - echo "[${HOST}] Joining ${MASTER} pool." - ${XE} pool-join master-address=${MASTER} master-username=root master-password=xenroot; - echo "[${HOST}] Joined." - fi -} - -#main loop -echo "Provisioning ${VM_PER_HOST} VMs on hosts: ${HOSTS} (master is ${MASTER})." -for HOST in $HOSTS; -do - (install-vhd $HOST; install $HOST; clone $HOST; uninstall $HOST; join-master $HOST) & -done From 2efa58ea88d1ba0b4432f405148e4d0f481c598e Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:36:22 +0000 Subject: [PATCH 104/341] CP-47869: removed repeat under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/repeat | 33 -------------------------------- 1 file changed, 33 deletions(-) delete mode 100755 scripts/scalability-tests/repeat diff --git a/scripts/scalability-tests/repeat b/scripts/scalability-tests/repeat deleted file mode 100755 index c2990a2d171..00000000000 --- a/scripts/scalability-tests/repeat +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -le 2 ]; then - echo "usage: $0 n operation vm_name [optional arguments]" - exit 1 -fi -N=$1 -OP=$2 -VM=$3 -EXTRA=$4 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average" - -perform () { - i=$1 - TMP=`xe vm-${OP} ${EXTRA} vm=${VM}${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}"; -} - -for i in `seq 1 ${N}`; do - perform $i -done From ed370b80a4485150ce4c748b2c60af2231a7cc21 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:37:09 +0000 Subject: [PATCH 105/341] CP-47869: removed repeat-clone under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/repeat-clone | 33 -------------------------- 1 file changed, 33 deletions(-) delete mode 100755 scripts/scalability-tests/repeat-clone diff --git a/scripts/scalability-tests/repeat-clone b/scripts/scalability-tests/repeat-clone deleted file mode 100755 index f293465b605..00000000000 --- a/scripts/scalability-tests/repeat-clone +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat-clone n vm_name -# - -if [ $# -ne 2 ]; then - echo "usage: $0 n vm_name" - exit 1 -fi -N=$1 -VM=$2 - -SR=`xe sr-list --minimal name-label='NFS virtual disk storage'` -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average vhd_size" - -perform () { - i=$1 - TMP=`xe vm-clone vm=${VM} new-name-label=${VM}${i}` - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - VHDSIZE=` xe vdi-list --minimal sr-uuid=${SR} | sed -e 's/,/\n/g' | wc -l` - echo "${i} ${DIFF} ${LOADAVG} ${VHDSIZE}" -} - -for i in `seq 1 ${N}`; do - perform $i -done From 60215d1f9284ce1061a1dc228f958893f5ead413 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:37:39 +0000 Subject: [PATCH 106/341] CP-47869: removed repeat-destroy under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/repeat-destroy | 33 ------------------------ 1 file changed, 33 deletions(-) delete mode 100755 scripts/scalability-tests/repeat-destroy diff --git a/scripts/scalability-tests/repeat-destroy b/scripts/scalability-tests/repeat-destroy deleted file mode 100755 index b8031e781e4..00000000000 --- a/scripts/scalability-tests/repeat-destroy +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -ne 2 ]; then - echo "usage: $0 n vm_name" - exit 1 -fi -N=$1 -VM=$2 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -echo "# vm_number cumulative_time load_average" -perform () { - i=$1 - VM_UUID=`xe vm-list name-label=${VM}${i} params=uuid --minimal` - if [ "${VM_UUID}" != "" ]; then - TMP=`xe vm-destroy uuid=${VM_UUID}` - fi - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}"; -} - -for i in `seq 1 ${N}`; do - perform $i; -done From ec051ed903b178d5d0426730f30251dcf9df39de Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:38:20 +0000 Subject: [PATCH 107/341] CP-47869: removed repeat-start under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/repeat-start | 46 -------------------------- 1 file changed, 46 deletions(-) delete mode 100755 scripts/scalability-tests/repeat-start diff --git a/scripts/scalability-tests/repeat-start b/scripts/scalability-tests/repeat-start deleted file mode 100755 index a439b7ac8b9..00000000000 --- a/scripts/scalability-tests/repeat-start +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./repeat n operation vm_name optional_args -# - -if [ $# -ne 2 ]; then - echo "Usage: $0 n vm_name" - echo "Starts VMs nammed vm_name<1> .. vm_name and output the time taken and the load average." - echo "if WAIT_FOR_IP is set to 1, then wait the IP address to appear before starting the next VM. need xgetip executable to be in the current directory." - exit 1 -fi - -N=$1 -VM_NAME=$2 - -MASTER=`xe pool-list params=master --minimal` -START=$(date +%s) - -wait_IP () { - i=$1 - VM_UUID=`xe vm-list name-label=${VM_NAME}${i} params=uuid --minimal` - MAC=`xe vif-list vm-uuid=${VM_UUID} params=MAC --minimal` - echo "Waiting for the IP address of ${VM_NAME}${i} to appear." - IP=`./xgetip xenbr0 ${MAC} &> /dev/null` - echo "IP address of ${VM_NAME}${i} is ${IP}." -} - -echo "# vm_number cumulative_time load_average" - -perform () { - i=$1 - TMP=`xe vm-start vm=${VM_NAME}${i}` - if [ "${WAIT_FOR_IP}" == "1" ]; then - wait_IP ${i} - fi - CURR=$(date +%s) - DIFF=$(( ${CURR} - ${START} )) - LOADAVG=`xe host-data-source-query data-source=loadavg host=${MASTER}` - echo "${i} ${DIFF} ${LOADAVG}" -} - -for i in `seq 1 ${N}`; do - perform $i -done From de7e31e56e272a90976708a3a0d093ab1691e6fc Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:39:16 +0000 Subject: [PATCH 108/341] CP-47869: removed start-tests under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/start-tests | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100755 scripts/scalability-tests/start-tests diff --git a/scripts/scalability-tests/start-tests b/scripts/scalability-tests/start-tests deleted file mode 100755 index 06fc671f135..00000000000 --- a/scripts/scalability-tests/start-tests +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./test-pool-size n vm_name -# -# Host1 will become the master of the pool, with host2 ... hostN as slaves. -# Then, on each host, vm_per_host VMs are created, with names debian-etch-HOST_NAME-i (for i in 1..vm_per_host) - -if [ $# -ne 2 ]; then - echo "Usage: $0 number_of_vm initial_vm_name" - echo "Need :" - echo " * ./repeat, ./repeat-clone, ./repeat-start and ./repeat-destroy scripts to be in the same directory that ${0};" - echo " * a pool already set up with a shared NFS storage and a HVM VM called dsl;" - echo " * ${0} must be started on the master of this pool;" - echo "${0} clones , then starts them all, then shutdown them all, then destroy them all." - echo "If WAIT_FOR_IP is set to 1, the script waits for the IP address of the VM to appear before starting the next VM." - exit 1 -fi - -N=${1} -VM=${2} - -./repeat-clone ${N} ${VM} > clone-${VM}.dat -./repeat-start ${N} ${VM} > start-${VM}.dat -./repeat ${N} shutdown ${VM} --force > shutdown-${VM}.dat -./repeat-destroy ${N} ${VM} > destroy-${VM}.dat \ No newline at end of file From fd0e921f741a221341430790547624475f371fba Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:39:42 +0000 Subject: [PATCH 109/341] CP-47869: removed stress-tests under /scripts/scalability-tests/ Signed-off-by: Ashwinh --- scripts/scalability-tests/stress-tests | 121 ------------------------- 1 file changed, 121 deletions(-) delete mode 100755 scripts/scalability-tests/stress-tests diff --git a/scripts/scalability-tests/stress-tests b/scripts/scalability-tests/stress-tests deleted file mode 100755 index e193728c9e7..00000000000 --- a/scripts/scalability-tests/stress-tests +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash -# -# Copyright (c) Citrix Systems 2008. All rights reserved. -# -# ./stress-tests number_of_tests vm_per_host master slave1 slave2 ... slaveN -# - -if [ $# -le 2 ]; then - echo "Usage: $0 number_of_tests vm_per_host master [slave1 ... slaveN]" - echo "You need debian-etch--<1..vm_per_host> VMs installed in each host of the pool (use ./provision-vm to set them up)." - echo "${0} is a XenRT-like script. It performs: " - echo " for each VM, do sequentialy:" - echo " start/wait IP/shutdown" - echo " suspend/resume" - echo " reboot" - echo " live migrate" - echo " non-live migrate" - exit 1 -fi - -N=$1 -VM_PER_HOST=$2 - -shift -shift -HOSTS=$@ -MASTER=$1 - -XE="xe -u root -pw xenroot -s ${MASTER}" - -wait_IP () { - VM=$1 - UUID=`${XE} vm-list name-label=${VM} params=uuid --minimal` - RC=1 - while [ ${RC} -ne 0 ] - do - sleep 2 - IP=`${XE} vm-param-get uuid=${UUID} param-name=networks param-key="0/ip" &> /dev/null` - RC=$? - done -} - -start () { - VM=$1 - - ${XE} vm-start vm=${VM} - wait_IP ${VM} -} - -perform () { - OP=$1 - VM=$2 - EXTRA=$3 - - ${XE} vm-${OP} vm=${VM} $EXTRA -} - -tests () { - HOST=$1 - VM=$2 - - echo "[${VM}] start/stop tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - start ${VM}; - perform shutdown ${VM}; - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.start-shutdown.dat - done - - echo "[${VM}] suspend/resume tests." - start ${VM} - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform suspend ${VM} - perform resume ${VM} - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.suspend-resume.dat - done - - echo "[${VM}] reboot tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform reboot ${VM} - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.reboot.dat - done - - wait_IP ${VM} - - echo "[${VM}] live migrate tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform migrate ${VM} "live=true host=${HOST}" - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.live-migrate.dat - done - - echo "[${VM}] non-live migrate tests." - START=$(date +%s) - for i in `seq 1 ${N}`; do - perform migrate ${VM} "live=false host=${HOST}" - CURR=$(date +%s); - DIFF=$(( ${CURR} - ${START} )); - echo "${i} ${DIFF}" >> ${VM}.non-live-migrate.dat - done - - perform shutdown ${VM} -} - -for HOST in ${HOSTS}; do - for i in `seq 1 ${VM_PER_HOST}`; do - VM="debian-etch-${HOST}-$i" - echo "Starting tests on ${VM}." - tests ${HOST} ${VM} & - done -done From 1afc9082a9efd7ee6a7c032db5fae4272b046fcb Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 29 May 2024 10:41:36 +0000 Subject: [PATCH 110/341] CP-47869: removed scalability-tests/event-count.py under /scripts/ Signed-off-by: Ashwinh --- scripts/scalability-tests/event-count.py | 61 ------------------------ 1 file changed, 61 deletions(-) delete mode 100644 scripts/scalability-tests/event-count.py diff --git a/scripts/scalability-tests/event-count.py b/scripts/scalability-tests/event-count.py deleted file mode 100644 index 24f3c0b5354..00000000000 --- a/scripts/scalability-tests/event-count.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -# Count the number of events received from the master - -from __future__ import print_function -import XenAPI, sys, time - -iso8601 = "%Y-%m-%dT%H:%M:%SZ" - - -def main(session): - global iso8601 - - token = '' - call_timeout = 30.0 - - while True: - sys.stdout.flush() - - now = time.time() - now_string = time.strftime(iso8601, time.gmtime(now)) - - try: - output = session.xenapi.event_from(["*"], token, call_timeout) - events = output['events'] - token = output['token'] - print("%s %10d 0" % (now_string, len(events))) - time.sleep(5) - - except KeyboardInterrupt: - break - - except XenAPI.Failure as e: - print(e.details) - sys.exit(1) - - -if __name__ == "__main__": - if len(sys.argv) != 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - url = sys.argv[1] - if url[:5] != "https": - raise Exception("Must use SSL for a realistic test") - - username = sys.argv[2] - password = sys.argv[3] - - new_session = XenAPI.Session(url) - try: - new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-eventcount.py") - except XenAPI.Failure as f: - print("Failed to acquire a session: %s" % f.details) - sys.exit(1) - - try: - main(new_session) - finally: - new_session.xenapi.logout() From 05b6741d052b178654ed6c6d0548b05923b04b9f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 3 Jun 2024 10:41:50 +0000 Subject: [PATCH 111/341] Revert "CP-47869: Removed rrdd-example.py ocaml/xcp-rrdd/scripts/rrdd/" Signed-off-by: Ashwinh This reverts commit a1b06ecc238fcd474eba2fb37a1cf2b83f78d0bb. --- ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100755 ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py diff --git a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py new file mode 100755 index 00000000000..e25e0ddf016 --- /dev/null +++ b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +import rrdd, os + +if __name__ == "__main__": + # Create a proxy for communicating with xcp-rrdd. + api = rrdd.API(plugin_id="host_mem") + while True: + # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. + api.wait_until_next_reading(neg_shift=.5) + # Collect measurements. + cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" + vs = os.popen(cmd).read().strip().split() + # Tell the proxy which datasources should be exposed in this iteration. + api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") + api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") + # Write all required information into a file about to be read by xcp-rrdd. + api.update() From 3885e39568406b0d29e05b65cc6f7a3c5dd49df5 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 5 Jun 2024 07:56:18 +0000 Subject: [PATCH 112/341] CP-47869: Removed event_listen.py under /ocaml/events/event_listen.py Signed-off-by: Ashwinh --- ocaml/events/event_listen.py | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100755 ocaml/events/event_listen.py diff --git a/ocaml/events/event_listen.py b/ocaml/events/event_listen.py deleted file mode 100755 index 79c0f8c4735..00000000000 --- a/ocaml/events/event_listen.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib, sys - -# Don't forget to include the port in the url (eg http://melton:8086/) -if len(sys.argv) <> 4: - raise "Expected arguments: " - -server = xmlrpclib.Server(sys.argv[1]); -session = server.session.login_with_password(sys.argv[2], sys.argv[3], "1.0", "xen-api-event-listen.py")['Value'] - -server.event.register(session, ["*"]) -while True: - events = server.event.next(session)['Value'] - for event in events: - print event['id'], " ", event['class'], " ", event['operation'], " ",event['ref'], " ", - if "snapshot" in event.keys(): - print "OK" - else: - print "(no snapshot)" From aac3a262b96a64dd80b1ad5073166761461f02a7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Fri, 14 Jun 2024 13:57:15 +0000 Subject: [PATCH 113/341] CP-49934: Disabled upload coverage report for python2.7 in /.github/workflows/other.yml Signed-off-by: Ashwinh --- .github/workflows/other.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 7c00b893e4a..fc4be895fd1 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -55,6 +55,7 @@ jobs: --cov-report xml:.git/coverage${{matrix.python-version}}.xml - name: Upload Python ${{matrix.python-version}} coverage report to Codecov + if: ${{ matrix.python-version != '2.7' }} uses: codecov/codecov-action@v3 with: directory: .git From 597e50ccf058aa0590e35aec944539a9d4f6b61c Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Mon, 17 Jun 2024 05:52:23 +0100 Subject: [PATCH 114/341] Fix pytype errors Signed-off-by: Stephen Cheng --- pyproject.toml | 1 - scripts/examples/python/XenAPI/XenAPI.py | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f9b701e4ed6..2730c0ac018 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -255,7 +255,6 @@ expected_to_fail = [ # SSLSocket.send() only accepts bytes, not unicode string as argument: "scripts/examples/python/exportimport.py", # Other fixes needed: - "scripts/examples/python/XenAPI/XenAPI.py", "scripts/examples/python/monitor-unwanted-domains.py", "scripts/examples/python/shell.py", "scripts/static-vdis", diff --git a/scripts/examples/python/XenAPI/XenAPI.py b/scripts/examples/python/XenAPI/XenAPI.py index 0211fe5e9c8..c4c71e4445e 100644 --- a/scripts/examples/python/XenAPI/XenAPI.py +++ b/scripts/examples/python/XenAPI/XenAPI.py @@ -54,6 +54,7 @@ # OF THIS SOFTWARE. # -------------------------------------------------------------------- +import errno import gettext import os import socket @@ -141,8 +142,8 @@ class Session(xmlrpclib.ServerProxy): session.xenapi.session.logout() """ - def __init__(self, uri, transport=None, encoding=None, verbose=0, - allow_none=1, ignore_ssl=False): + def __init__(self, uri, transport=None, encoding=None, verbose=False, + allow_none=True, ignore_ssl=False): # Fix for CA-172901 (+ Python 2.4 compatibility) # Fix for context=ctx ( < Python 2.7.9 compatibility) @@ -198,7 +199,7 @@ def _login(self, method, params): self.last_login_params = params self.API_version = self._get_api_version() except socket.error as e: - if e.errno == socket.errno.ETIMEDOUT: + if e.errno == errno.ETIMEDOUT: raise xmlrpclib.Fault(504, 'The connection timed out') else: raise e @@ -206,7 +207,7 @@ def _login(self, method, params): def _logout(self): try: if self.last_login_method.startswith("slave_local"): - return _parse_result(self.session.local_logout(self._session)) + return _parse_result(self.session.local_logout(self._session)) # pytype: disable=attribute-error else: return _parse_result(self.session.logout(self._session)) finally: From a79ce2a2a5db0ecfb741d3a3a570b562f525a5ef Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 13 Jun 2024 15:07:47 +0000 Subject: [PATCH 115/341] CP-49896: Moved xe-scsi-dev-map to bin directory under python3 - Modified Makefile to include xe-scsi-dev-map - Removed xe-scsi-dev-map from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3/bin}/xe-scsi-dev-map | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 1 deletion(-) rename {scripts => python3/bin}/xe-scsi-dev-map (100%) diff --git a/python3/Makefile b/python3/Makefile index 1384df9284c..d781ec27bd8 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -19,3 +19,4 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/xe-scsi-dev-map b/python3/bin/xe-scsi-dev-map similarity index 100% rename from scripts/xe-scsi-dev-map rename to python3/bin/xe-scsi-dev-map diff --git a/scripts/Makefile b/scripts/Makefile index 3c3ce93b22b..dbac54dc8b5 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -90,7 +90,6 @@ install: $(IPROG) update-ca-bundle.sh $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(OPTDIR)/debug $(IPROG) debug_ha_query_liveset $(DESTDIR)$(OPTDIR)/debug - $(IPROG) xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-mount-iso-sr $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-reset-networking $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-toolstack-restart $(DESTDIR)$(OPTDIR)/bin From 56932500b2be96cd2015af56e6d7d79a2f935a25 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 17 Jun 2024 12:00:00 +0200 Subject: [PATCH 116/341] CA-390883: Add docs and improve comments on pytest and Coverage.py Signed-off-by: Bernhard Kaindl --- doc/content/python/_index.md | 129 +++++++++++++++++++++++++++++++++++ pyproject.toml | 36 ++++++---- 2 files changed, 150 insertions(+), 15 deletions(-) create mode 100644 doc/content/python/_index.md diff --git a/doc/content/python/_index.md b/doc/content/python/_index.md new file mode 100644 index 00000000000..773f02ce38c --- /dev/null +++ b/doc/content/python/_index.md @@ -0,0 +1,129 @@ +--- +title: "Python" +--- + +Introduction +------------ + +Most Python3 scripts and plugins shall be located below the `python3` directory. +The structure of the directory is as follows: + +- `python3/bin`: This contains files installed in `/opt/xensource/bin` + and are meant to be run by users +- `python3/libexec`: This contains files installed in `/opt/xensource/libexec` + and are meant to only be run by `xapi` and other daemons. +- `python3/packages`: Contains files to be installed in python's `site-packages` + are meant to be modules and packages to be imported by other scripts + or executed via `python3 -m` +- `python3/plugins`: This contains files that + are meant to be `xapi` plugins +- `python3/tests`: Tests for testing and covering the Python scripts and plugins + +Dependencies for development and testing +---------------------------------------- + +In GitHub CI and local testing, we can use [pre-commit] to execute the tests. +It provides a dedicated, clearly defined and always consistent Python environment. +The easiest way to run all tests and checks is to simply run [pre-commit]. +The example commands below assume that you have Python3 in your PATH. +Currently, Python 3.11 is required for it: + +```bash { title="Installing and running pre-commit" } +pip3 install pre-commit +pre-commit run -av +# Or, to just run the pytest hook: +pre-commit run -av pytest +``` + +> Note: By default, CentOS 8 provides Python 3.6, whereas some tests need Python >= 3.7 + +Alternatively, you can of course tests in any suitable environment, +given that you install the supported versions of all dependencies. +You can find the dependencies in the list [additional_dependencies] of the [pytest] hook +in the [pre-commit] configuration file [.pre-commit-config.yaml]. +{{% expand title= +"Example `pytest` hook from `.pre-commit-config.yaml` (expand)" %}} + +```yaml + hooks: + - id: pytest + files: python3/ + name: check that the Python3 test suite in passes + entry: sh -c 'coverage run && coverage xml && + coverage html && coverage report && + diff-cover --ignore-whitespace --compare-branch=origin/master + --show-uncovered --html-report .git/coverage-diff.html + --fail-under 50 .git/coverage3.11.xml' + require_serial: true + pass_filenames: false + language: python + types: [python] + additional_dependencies: + - coverage + - diff-cover + - future + - opentelemetry-api + - opentelemetry-exporter-zipkin-json + - opentelemetry-sdk + - pytest-mock + - mock + - wrapt + - XenAPI +``` + +{{% /expand %}} + +Coverage +-------- + +Code moved to the python3 directory tree shall have good code coverage using +tests that are executed, verified and covered using [pytest] and [Coverage.py]. +The `coverage` tool and [pytest] are configured in `pyproject.toml` and +`coverage run` is configured to run [pytest] by default. + +`coverage run` collects coverage from the run and stores it in its database. +The most simple command line to run and report coverage to stdout is: +`coverage run && coverage report` + +{{% expand title="Other commands also used in the pytest hook example above (expand)" %}} + +- `coverage xml`: Generates an XML report from the coverage database to + `.git/coverage3.11.xml`. It is needed for upload to +- `coverage html`: Generates an HTML report from the coverage database to + `.git/coverage_html/` +{{% /expand %}} + +We configure the file paths used for the generated database and other coverage +configuration in the sections `[tool.coverage.run]` and `[tool.coverage.report]` +of `pyproject.toml`. + +Pytest +------ + +If your Python environment has the [dependencies for the tests] installed, you +can run [pytest] in this environment without any arguments to use the defaults. + +{{% expand title="For development, pytest can also only run one test (expand)" %}} + +To run a specific pytest command, run pytest and pass the test case to it (example): + +```bash { title="Example for running only one specific test" } +pytest python3/tests/test_perfmon.py +``` + +```bash { title="Running only one test and reporting the code coverage of it" } +coverage run -m pytest python3/tests/test_perfmon.py && coverage report +``` + +{{% /expand %}} + +[coverage.py]: https://coverage.readthedocs.io +"coverage.py is the coverage collector for Python" +[dependencies for the tests]: #dependencies-for-development-and-testing +"Installation of the dependencies for development and testing" +[pytest]: https://docs.pytest.org "Pytest documentation" +[pre-commit]: https://pre-commit.com "pre-commit commit hook framework" +[.pre-commit-config.yaml]: https://pre-commit.com/#adding-pre-commit-plugins-to-your-project +"project-specific configuration file of pre-commit, found in the project's top directory" +[additional_dependencies]: https://pre-commit.com/#pre-commit-configyaml---hooks +"dependencies that will be installed in the environment where this hook gets to run" diff --git a/pyproject.toml b/pyproject.toml index f9b701e4ed6..2cee87645a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,20 @@ line-length = 88 # ----------------------------------------------------------------------------- # Coverage.py - https://coverage.readthedocs.io/en/coverage-5.5/config.html +# +# [tool.coverage.run] and [tool.coverage.report] configure these commands: +# coverage run && coverage report +# +# These work in conjunction with [tool.pytest.ini_options] to set defaults +# for running pytest (on its own) and for running Coverage.py with pytest: +# +# Examples for Python test development with Coverage.py: +# +# Run the default tests and check coverage: +# coverage run && coverage report +# +# Run a custom set of tests and check coverage: +# coverage run -m pytest python3/tests/test_*.py && coverage report # ----------------------------------------------------------------------------- [tool.coverage.report] @@ -198,22 +212,12 @@ exclude = [ [tool.pytest.ini_options] # ----------------------------------------------------------------------------- -# Options to enable for pytest by default: +# addopts: Options to add to all pytest calls: # -v show what happens # -ra show short summary after running tests -# Other options should not be passed using addopts, as addopts forces those -# options to be used every time pytest is run, which is very restrictive. -# Instead, use `coverage run` to configure coverage options, and support -# running specific tests by passing them as arguments to pytest: -# For example: -# coverage run -m pytest python3/tests/test_xenapi.py -# Adding specific --cov options using addopts is not recommended as it would -# require to use the pytest-cov plugin, which would conflict with the use of -# `coverage run`. Instead, use `coverage` to configure coverage options. -# Specifying directories to test is better done using the testpaths option, -# as testpaths sets the default directories to search for tests, but does not -# force them to be run, so you can still run specific tests files by just -# passing them as arguments to pytest: pytest python3/tests/test_xenapi.py +# +# addopts are added to all pytest calls. We don't add options that would force +# testing specific paths. To be flexible, we use use testpaths instead(see below) # ----------------------------------------------------------------------------- addopts = "-v -ra" @@ -223,7 +227,9 @@ addopts = "-v -ra" # log_cli_level: log level to show # python_files: pattern for test files # python_functions: pattern for test functions -# testpaths: directories to search for tests +# testpaths: directories to search for tests(by default, used for CI) +# For development, developers can test only specific files: +# Example: pytest python3/tests/test_perfmon.py # minversion: this config requires pytest>=7 to configure pythonpath # pythonpath: path to stub files and typing stubs for tests # xfail_strict: require to remove pytext.xfail marker when test is fixed From 1ee2b79257a52ebbfd70f193fc518c3a9089af63 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 17 Jun 2024 12:00:00 +0200 Subject: [PATCH 117/341] shell.py: Fix warning caused by overwriting cmd with str and whitespace Signed-off-by: Bernhard Kaindl --- pyproject.toml | 1 - scripts/examples/python/shell.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2730c0ac018..6912a211e27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -256,7 +256,6 @@ expected_to_fail = [ "scripts/examples/python/exportimport.py", # Other fixes needed: "scripts/examples/python/monitor-unwanted-domains.py", - "scripts/examples/python/shell.py", "scripts/static-vdis", "scripts/plugins/extauth-hook-AD.py", ] diff --git a/scripts/examples/python/shell.py b/scripts/examples/python/shell.py index 6e5e4f8ff27..0fa226d798f 100644 --- a/scripts/examples/python/shell.py +++ b/scripts/examples/python/shell.py @@ -71,7 +71,7 @@ def munge_types (str): return True elif str == "False": return False - + try: return int(str) except: @@ -81,12 +81,12 @@ def munge_types (str): if len(sys.argv) < 2: print("Usage:") print(sys.argv[0], " ") - sys.exit(1) + sys.exit(1) if sys.argv[1] != "-" and len(sys.argv) < 4: print("Usage:") print(sys.argv[0], " ") - sys.exit(1) + sys.exit(1) if sys.argv[1] != "-": url = sys.argv[1] @@ -103,10 +103,10 @@ def munge_types (str): # We want to support directly executing the cmd line, # where appropriate if len(sys.argv) > cmdAt: - cmd = sys.argv[cmdAt] + command = sys.argv[cmdAt] params = [munge_types(x) for x in sys.argv[(cmdAt + 1):]] try: - print(session.xenapi_request(cmd, tuple(params)), file=sys.stdout) + print(session.xenapi_request(command, tuple(params)), file=sys.stdout) except XenAPI.Failure as x: print(x, file=sys.stderr) sys.exit(2) From c2b2cf9dfb39e4aa12b9b3311a62065b32b14dd2 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 17 Jun 2024 12:00:00 +0200 Subject: [PATCH 118/341] shell.py: Fix warning caused by overwriting built-in str Signed-off-by: Bernhard Kaindl --- scripts/examples/python/shell.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/examples/python/shell.py b/scripts/examples/python/shell.py index 0fa226d798f..3cfdde757db 100644 --- a/scripts/examples/python/shell.py +++ b/scripts/examples/python/shell.py @@ -66,16 +66,18 @@ def do_EOF(self, line): print() sys.exit(0) -def munge_types (str): - if str == "True": + +def munge_types(var): + if var == "True": return True - elif str == "False": + if var == "False": return False try: - return int(str) + return int(var) except: - return str + return var + if __name__ == "__main__": if len(sys.argv) < 2: From 1f2dacf3766e3b8bd1deed9e8252a1f5d38901c2 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 17 Jun 2024 11:40:58 +0000 Subject: [PATCH 119/341] CP-49901: Moved disk-space from scripts/plugins to python3/plugins - Modified Makefile to include disk-space in python3/plugins directory - Removed disk-space from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 2 ++ {scripts => python3}/plugins/disk-space | 0 scripts/Makefile | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) rename {scripts => python3}/plugins/disk-space (100%) diff --git a/python3/Makefile b/python3/Makefile index d781ec27bd8..f901767ec64 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -20,3 +20,5 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin + install -d -m 755 $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) diff --git a/scripts/plugins/disk-space b/python3/plugins/disk-space similarity index 100% rename from scripts/plugins/disk-space rename to python3/plugins/disk-space diff --git a/scripts/Makefile b/scripts/Makefile index dbac54dc8b5..cfe52c6a7b9 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -131,7 +131,6 @@ install: $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead From cbaab651737b27614c25d960da59c9a05456c34d Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 18 Jun 2024 10:15:55 +0000 Subject: [PATCH 120/341] CP-49930: Moved wlan.py from scripts/poweron to python3/poweron - Modified Makefile in python3 directory to include wlan.py - Fixed pytest error - Removed wlan.py from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 2 ++ {scripts => python3}/poweron/wlan.py | 7 ++++--- scripts/Makefile | 2 -- 3 files changed, 6 insertions(+), 5 deletions(-) rename {scripts => python3}/poweron/wlan.py (96%) diff --git a/python3/Makefile b/python3/Makefile index f901767ec64..1965b241ff5 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -22,3 +22,5 @@ install: $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin install -d -m 755 $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) + $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py + $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan diff --git a/scripts/poweron/wlan.py b/python3/poweron/wlan.py similarity index 96% rename from scripts/poweron/wlan.py rename to python3/poweron/wlan.py index 948ba9a5433..1506968c2bd 100755 --- a/scripts/poweron/wlan.py +++ b/python3/poweron/wlan.py @@ -65,6 +65,10 @@ def get_physical_pif(session, pif_ref): def wake_on_lan(session, host, remote_host_uuid): + """ + Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams + sent to the broadcast_addr. + """ # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: @@ -79,9 +83,6 @@ def wake_on_lan(session, host, remote_host_uuid): remote_pif = get_physical_pif(session, mgmt_pif) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) - - """Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams - sent to the broadcast_addr.""" # A Wake-On-LAN packet contains FF:FF:FF:FF:FF:FF followed by 16 repetitions of the target MAC address bin_payload = bytes.fromhex("F" * 12 + mac.replace(":", "") * 16) diff --git a/scripts/Makefile b/scripts/Makefile index cfe52c6a7b9..d7de936178f 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -169,8 +169,6 @@ endif $(IPROG) examples/python/echo.py $(DESTDIR)$(PLUGINDIR)/echo $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py # poweron - $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py - $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan $(IPROG) poweron/DRAC.py $(DESTDIR)$(PLUGINDIR)/DRAC.py $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host # YUM plugins From 438d753dfc7c1d183e9a07ee4e0d5459119ba9fe Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 09:03:38 +0000 Subject: [PATCH 121/341] CP-49902: Moved DRAC.py from scripts/poweron to python3/poweron - Modified Makefile in python3 directory to include DRAC.py - Removed DRAC.PY from scripts/poweron - Removed DRAC.py from scripts/Makefile - Fixed pytlint issue by using sys.exit() Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3}/poweron/DRAC.py | 2 +- scripts/Makefile | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) rename {scripts => python3}/poweron/DRAC.py (98%) diff --git a/python3/Makefile b/python3/Makefile index 1965b241ff5..44300c307e7 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -24,3 +24,4 @@ install: $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan + $(IPROG) poweron/DRAC.py $(DESTDIR)$(PLUGINDIR)/DRAC.py diff --git a/scripts/poweron/DRAC.py b/python3/poweron/DRAC.py similarity index 98% rename from scripts/poweron/DRAC.py rename to python3/poweron/DRAC.py index bace3a177a4..4493d8d6c6a 100644 --- a/scripts/poweron/DRAC.py +++ b/python3/poweron/DRAC.py @@ -49,7 +49,7 @@ def DRAC(power_on_ip, user, password): def main(): if len(sys.argv) < 3: - exit(0) + sys.exit(0) ip = sys.argv[1] user = sys.argv[2] password = sys.argv[3] diff --git a/scripts/Makefile b/scripts/Makefile index d7de936178f..5700d4bd879 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -169,7 +169,6 @@ endif $(IPROG) examples/python/echo.py $(DESTDIR)$(PLUGINDIR)/echo $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py # poweron - $(IPROG) poweron/DRAC.py $(DESTDIR)$(PLUGINDIR)/DRAC.py $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) From ebf54eb1e12ea0dd1de3b8a77e15e3a95668b59c Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 11:35:24 +0000 Subject: [PATCH 122/341] CP-49975: Replaced mkdir -p with \\\$(IPROG) -d for directory creation in install target Signed-off-by: Ashwinh --- python3/Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python3/Makefile b/python3/Makefile index 44300c307e7..e86d5c683c3 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -6,9 +6,10 @@ IDATA=install -m 644 SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") install: - mkdir -p $(DESTDIR)$(OPTDIR)/bin - mkdir -p $(DESTDIR)$(SITE3_DIR) - mkdir -p $(DESTDIR)$(LIBEXECDIR) + $(IPROG) -d $(DESTDIR)$(OPTDIR)/bin + $(IPROG) -d $(DESTDIR)$(SITE3_DIR) + $(IPROG) -d $(DESTDIR)$(LIBEXECDIR) + $(IPROG) -d $(DESTDIR)$(PLUGINDIR) $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ @@ -20,7 +21,6 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin - install -d -m 755 $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan From 6b70aad0877cfe4606bc9ed4501bd2ad59fa70c0 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 12:44:41 +0000 Subject: [PATCH 123/341] CP-49904: Removed exportimport.py from scripts/examples/python/ - Removed exportimport.py from expected_to_fail in pyproject.toml Signed-off-by: Ashwinh --- pyproject.toml | 2 - scripts/examples/python/exportimport.py | 142 ------------------------ 2 files changed, 144 deletions(-) delete mode 100755 scripts/examples/python/exportimport.py diff --git a/pyproject.toml b/pyproject.toml index cb2be7ffbb5..b41deb50bf4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -258,8 +258,6 @@ expected_to_fail = [ # Need 2to3 -w and maybe a few other minor updates: "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", - # SSLSocket.send() only accepts bytes, not unicode string as argument: - "scripts/examples/python/exportimport.py", # Other fixes needed: "scripts/examples/python/monitor-unwanted-domains.py", "scripts/examples/python/shell.py", diff --git a/scripts/examples/python/exportimport.py b/scripts/examples/python/exportimport.py deleted file mode 100755 index bc72580659b..00000000000 --- a/scripts/examples/python/exportimport.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2014 Citrix, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Demonstrate how to -# - export raw disk images -# - import raw disk images -# - connect an export to an import to copy a raw disk image - -from __future__ import print_function -import sys, os, socket, urllib.request, urllib.error, urllib.parse, XenAPI, traceback, ssl, time - -def exportimport(url, xapi, session, src_vdi, dst_vdi): - # If an HTTP operation fails then it will record the error on the task - # object. Note you can't use the HTTP response code for this because - # it must be sent *before* the stream is processed. - import_task = xapi.xenapi.task.create("import " + dst_vdi, "") - export_task = xapi.xenapi.task.create("export " + src_vdi, "") - try: - # an HTTP GET of this will export a disk: - get_url = "/export_raw_vdi?session_id=%s&vdi=%s&task_id=%s" % (session, src_vdi, export_task) - # an HTTP PUT to this will import a disk: - put_url = "/import_raw_vdi?session_id=%s&vdi=%s&task_id=%s" % (session, dst_vdi, import_task) - - # 'data' is the stream of raw data: - data = urllib.request.urlopen(url + get_url) - - # python's builtin library doesn't support HTTP PUT very well - # so we do it manually. Note xapi doesn't support Transfer-encoding: - # chunked so we must send the data raw. - url = urllib.parse.urlparse(url) - host = url.netloc.split(":")[0] # assume port 443 - if url.scheme != "https": - print("Sorry, this example only supports HTTPS (not HTTP)", file=sys.stderr) - print("Plaintext HTTP has the following problems:", file=sys.stderr) - print(" - the data can be captured by other programs on the network", file=sys.stderr) - print(" - some network middleboxes will mangle the data", file=sys.stderr) - # time wasted debugging a problem caused by a middlebox: 3hrs - # Just use HTTPS! - return - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - output = ssl.wrap_socket(s) - output.connect((host, 443)) - - # HTTP/1.0 with no transfer-encoding - headers = [ - "PUT %s HTTP/1.0" % put_url, - "Connection:close", - "" - ] - print("Sending HTTP request:") - for h in headers: - output.send("%s\r\n" % h) - print("%s\r\n" % h) - result = output.recv(1024) - print("Received HTTP response:") - print(result) - if "200 OK" not in result: - print("Expected an HTTP 200, got %s" % result, file=sys.stderr) - return - - # Copy the raw bytes, signal completion by closing the socket - virtual_size = long(xapi.xenapi.VDI.get_virtual_size(src_vdi)) - print("Copying %Ld bytes" % virtual_size) - left = virtual_size - while left > 0: - block = data.read(min(65536, left)) - if block is None: - break - output.send(block) - left = left - len(block) - output.close() - - # Wait for the tasks to complete and check whether they both - # succeeded. It takes a few seconds to detach the disk etc. - finished = False - while not finished: - import_status = xapi.xenapi.task.get_status(import_task) - export_status = xapi.xenapi.task.get_status(export_task) - finished = import_status != "pending" and export_task != "pending" - time.sleep(1) - if import_status == "success" and export_status == "success": - print("OK") - else: - print("FAILED") - if import_status != "success": - print("The import task failed with: ", " ".join(xapi.xenapi.task.get_error_info(import_task))) - if export_status != "success": - print("The export task failed with: ", " ".join(xapi.xenapi.task.get_error_info(export_task))) - - finally: - # The task creator has to destroy them at the end: - xapi.xenapi.task.destroy(import_task) - xapi.xenapi.task.destroy(export_task) - -if __name__ == "__main__": - if len(sys.argv) != 5: - print("Usage:") - print(sys.argv[0], " ") - print(" -- creates a fresh VDI and streams the contents of into it.") - print() - print("Example:") - print("SR=$(xe pool-list params=default-SR --minimal)") - print("VDI=$(xe vdi-create sr-uuid=$SR name-label=test virtual-size=128MiB type=user)") - print(sys.argv[0], "https://localhost password $VDI") - sys.exit(1) - url = sys.argv[1] - username = sys.argv[2] - password = sys.argv[3] - vdi_uuid = sys.argv[4] - # First acquire a valid session by logging in: - xapi = XenAPI.Session(url) - xapi.xenapi.login_with_password(username, password, '1.0', 'xen-api-scripts-exportimport.py') - dst_vdi = None - try: - src_vdi = xapi.xenapi.VDI.get_by_uuid(vdi_uuid) - sr = xapi.xenapi.VDI.get_SR(src_vdi) - # Create an empty VDI with the same initial parameters (e.g. size) - # to upload into - vdi_args = xapi.xenapi.VDI.get_record(src_vdi) - dst_vdi = xapi.xenapi.VDI.create(vdi_args) - exportimport(url, xapi, xapi._session, src_vdi, dst_vdi) - except Exception as e: - print("Caught %s: trying to clean up" % str(e)) - traceback.print_exc() - if dst_vdi: - xapi.xenapi.VDI.destroy(dst_vdi) - finally: - xapi.xenapi.logout() From 9319f5edfd057e57bc0206fe17c56ca12d275628 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 13:52:58 +0000 Subject: [PATCH 124/341] CP-49914: Removed monitor-unwanted-domains.py from scripts/examples/python/ - Removed monitor-unwanted-domains.py from expected_to_fail in pyproject.toml Signed-off-by: Ashwinh --- pyproject.toml | 1 - .../python/monitor-unwanted-domains.py | 89 ------------------- 2 files changed, 90 deletions(-) delete mode 100644 scripts/examples/python/monitor-unwanted-domains.py diff --git a/pyproject.toml b/pyproject.toml index cb2be7ffbb5..7fa401d14a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -261,7 +261,6 @@ expected_to_fail = [ # SSLSocket.send() only accepts bytes, not unicode string as argument: "scripts/examples/python/exportimport.py", # Other fixes needed: - "scripts/examples/python/monitor-unwanted-domains.py", "scripts/examples/python/shell.py", "scripts/static-vdis", "scripts/plugins/extauth-hook-AD.py", diff --git a/scripts/examples/python/monitor-unwanted-domains.py b/scripts/examples/python/monitor-unwanted-domains.py deleted file mode 100644 index 317725288e2..00000000000 --- a/scripts/examples/python/monitor-unwanted-domains.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import os, subprocess, XenAPI, inventory, time, sys - -# Script which monitors the domains running on a host, looks for -# paused domains which don't correspond to VMs which are running here -# or are about to run here, logs them and optionally destroys them. - -# Return a list of (domid, uuid) tuples, one per paused domain on this host -def list_paused_domains(): - results = [] - all = subprocess.Popen(["@OPTDIR@/bin/list_domains"], stdout=subprocess.PIPE).communicate()[0] - lines = all.split("\n") - for domain in lines[1:]: - bits = domain.split() - if bits != []: - domid = bits[0] - uuid = bits[2] - state = bits[4] - if 'P' in state: - results.append( (domid, uuid) ) - return results - -# Given localhost's uuid and a (domid, uuid) tuple, return True if the domain -# be somewhere else i.e. we think it may have leaked here -def should_domain_be_somewhere_else(localhost_uuid, domain): - (domid, uuid) = domain - try: - x = XenAPI.xapi_local() - x.xenapi.login_with_password("root", "", "1.0", "xen-api-scripts-monitor-unwanted-domains.py") - try: - try: - vm = x.xenapi.VM.get_by_uuid(uuid) - resident_on = x.xenapi.VM.get_resident_on(vm) - current_operations = x.xenapi.VM.get_current_operations(vm) - result = current_operations == {} and resident_on != localhost_uuid - if result: - log("domid %s uuid %s: is not being operated on and is not resident here" % (domid, uuid)) - return result - except XenAPI.Failure as e: - if e.details[0] == "UUID_INVALID": - # VM is totally bogus - log("domid %s uuid %s: is not in the xapi database" % (domid, uuid)) - return True - # fail safe for now - return False - finally: - x.xenapi.logout() - except: - return False - -def log(str): - print(str) - -# Destroy the given domain -def destroy_domain(domain): - (domid, uuid) = domain - log("destroying domid %s uuid %s" % (domid, uuid)) - all = subprocess.Popen(["@OPTDIR@/debug/destroy_domain", "-domid", domid], stdout=subprocess.PIPE).communicate()[0] - -# Keep track of when a domain first looked like it should be here -domain_first_noticed = {} - -# Number of seconds after which we conclude that a domain really shouldn't be here -threshold = 60 - -if __name__ == "__main__": - localhost_uuid = inventory.get_localhost_uuid () - while True: - time.sleep(1) - paused = list_paused_domains () - # GC the domain_first_noticed map - for d in domain_first_noticed.keys(): - if d not in paused: - log("domid %s uuid %s: looks ok now, forgetting about it" % d) - del domain_first_noticed[d] - - for d in list_paused_domains(): - if should_domain_be_somewhere_else(localhost_uuid, d): - if d not in domain_first_noticed: - domain_first_noticed[d] = time.time() - noticed_for = time.time() - domain_first_noticed[d] - if noticed_for > threshold: - log("domid %s uuid %s: has been in bad state for over threshold" % d) - if "-destroy" in sys.argv: - destroy_domain(d) - - From d9e81653e79c6c9857004a705de7ca1b8ae73ca7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 14:56:12 +0000 Subject: [PATCH 125/341] CP-49913: Removed mini-xenrt.py from scripts/examples/python/ Signed-off-by: Ashwinh --- scripts/examples/python/mini-xenrt.py | 141 -------------------------- 1 file changed, 141 deletions(-) delete mode 100644 scripts/examples/python/mini-xenrt.py diff --git a/scripts/examples/python/mini-xenrt.py b/scripts/examples/python/mini-xenrt.py deleted file mode 100644 index b30e9d9973c..00000000000 --- a/scripts/examples/python/mini-xenrt.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python3 - -# Receive multiple VMs -# Issue parallel loops of: reboot, suspend/resume, migrate - -from __future__ import print_function -import xmlrpc.client -from threading import Thread -import time, sys - -iso8601 = "%Y%m%dT%H:%M:%SZ" - -stop_on_first_failure = True -stop = False - -class Operation: - def __init__(self): - raise NotImplementedError - def execute(self, server, session_id): - raise NotImplementedError - -class Reboot(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - return server.VM.clean_reboot(session_id, self.vm) - def __str__(self): - return "clean_reboot(%s)" % self.vm - -class SuspendResume(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - x = { "ErrorDescription": [ "VM_MISSING_PV_DRIVERS" ] } - while "ErrorDescription" in x and x["ErrorDescription"][0] == "VM_MISSING_PV_DRIVERS": - x = server.VM.suspend(session_id, self.vm) - if "ErrorDescription" in x: - time.sleep(1) - if x["Status"] != "Success": - return x - return server.VM.resume(session_id, self.vm, False, False) - def __str__(self): - return "suspendresume(%s)" % self.vm - -class ShutdownStart(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - x = server.VM.clean_shutdown(session_id, self.vm) - if x["Status"] != "Success": - return x - return server.VM.start(session_id, self.vm, False, False) - #return { "Status": "bad", "ErrorDescription": "foo" } - def __str__(self): - return "shutdownstart(%s)" % self.vm - -class LocalhostMigrate(Operation): - def __init__(self, vm): - self.vm = vm - def execute(self, server, session_id): - return server.VM.pool_migrate(session_id, self.vm, server.VM.get_resident_on(session_id, self.vm)["Value"], { "live": "true" } ) - def __str__(self): - return "localhostmigrate(%s)" % self.vm - -# Use this to give each thread a different ID -worker_count = 0 - -class Worker(Thread): - def __init__(self, server, session_id, operations): - Thread.__init__(self) - self.server = server - self.session_id = session_id - self.operations = operations - self.num_successes = 0 - self.num_failures = 0 - global worker_count - self.id = worker_count - worker_count = worker_count + 1 - def run(self): - global iso8601 - global stop_on_first_failure, stop - for op in self.operations: - description = str(op) - - if stop: - return - - start = time.strftime(iso8601, time.gmtime(time.time ())) - result = op.execute(self.server, self.session_id) - end = time.strftime(iso8601, time.gmtime(time.time ())) - - if result["Status"] == "Success": - print("SUCCESS %d %s %s %s" % (self.id, start, end, description)) - self.num_successes = self.num_successes + 1 - else: - error_descr = result["ErrorDescription"] - print("FAILURE %d %s %s %s %s" % (self.id, start, end, error_descr[0], description)) - self.num_failures = self.num_failures + 1 - if stop_on_first_failure: - stop = True - -def make_operation_list(vm): - return [ Reboot(vm), SuspendResume(vm), LocalhostMigrate(vm) ] * 100 - -if __name__ == "__main__": - if len(sys.argv) != 3: - print("Usage:") - print(" %s " % (sys.argv[0])) - print(" -- performs parallel operations on VMs with the specified other-config key") - sys.exit(1) - - x = xmlrpc.client.ServerProxy(sys.argv[1]) - key = sys.argv[2] - session = x.session.login_with_password("root", "xenroot", "1.0", "xen-api-scripts-minixenrt.py")["Value"] - vms = x.VM.get_all_records(session)["Value"] - - workers = [] - for vm in vms.keys(): - if key in vms[vm]["other_config"]: - allowed_ops = vms[vm]["allowed_operations"] - for op in [ "clean_reboot", "suspend", "pool_migrate" ]: - if op not in allowed_ops: - raise RuntimeError("VM %s is not in a state where it can %s" % (vms[vm]["name_label"], op)) - workers.append(Worker(x, session, make_operation_list(vm))) - for w in workers: - w.start() - for w in workers: - w.join() - successes = 0 - failures = 0 - for w in workers: - successes = successes + w.num_successes - failures = failures + w.num_failures - print("Total successes = %d" % successes) - print("Total failures = %d" % failures) - if failures == 0: - print("PASS") - sys.exit(0) - else: - print("FAIL") - sys.exit(1) From 798fa8979a991e277db1030584eb6c1339dacf0f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 15:06:48 +0000 Subject: [PATCH 126/341] CP-49923: Removed provision.py from scripts/examples/python/ Signed-off-by: Ashwinh --- scripts/examples/python/provision.py | 111 --------------------------- 1 file changed, 111 deletions(-) delete mode 100644 scripts/examples/python/provision.py diff --git a/scripts/examples/python/provision.py b/scripts/examples/python/provision.py deleted file mode 100644 index 3b8a224ffae..00000000000 --- a/scripts/examples/python/provision.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2007 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Parse/regenerate the "disk provisioning" XML contained within templates -# NB this provisioning XML refers to disks which should be created when -# a VM is installed from this template. It does not apply to templates -# which have been created from real VMs -- they have their own disks. - -from __future__ import print_function -import XenAPI -import xml.dom.minidom - -class Disk: - """Represents a disk which should be created for this VM""" - def __init__(self, device, size, sr, bootable): - self.device = device # 0, 1, 2, ... - self.size = size # in bytes - self.sr = sr # uuid of SR - self.bootable = bootable - def toElement(self, doc): - disk = doc.createElement("disk") - disk.setAttribute("device", self.device) - disk.setAttribute("size", self.size) - disk.setAttribute("sr", self.sr) - b = "false" - if self.bootable: b = "true" - disk.setAttribute("bootable", b) - return disk - -def parseDisk(element): - device = element.getAttribute("device") - size = element.getAttribute("size") - sr = element.getAttribute("sr") - b = element.getAttribute("bootable") == "true" - return Disk(device, size, sr, b) - -class ProvisionSpec: - """Represents a provisioning specification: currently a list of required disks""" - def __init__(self): - self.disks = [] - def toElement(self, doc): - element = doc.createElement("provision") - for disk in self.disks: - element.appendChild(disk.toElement(doc)) - return element - def setSR(self, sr): - """Set the requested SR for each disk""" - for disk in self.disks: - disk.sr = sr - -def parseProvisionSpec(txt): - """Return an instance of type ProvisionSpec given XML text""" - doc = xml.dom.minidom.parseString(txt) # pytype: disable=pyi-error - all = doc.getElementsByTagName("provision") - if len(all) != 1: - raise ValueError("Expected to find exactly one element") - ps = ProvisionSpec() - disks = all[0].getElementsByTagName("disk") - for disk in disks: - ps.disks.append(parseDisk(disk)) - return ps - -def printProvisionSpec(ps): - """Return a string containing pretty-printed XML corresponding to the supplied provisioning spec""" - doc = xml.dom.minidom.Document() # pytype: disable=pyi-error - doc.appendChild(ps.toElement(doc)) - return doc.toprettyxml() - -def getProvisionSpec(session, vm): - """Read the provision spec of a template/VM""" - other_config = session.xenapi.VM.get_other_config(vm) - return parseProvisionSpec(other_config['disks']) - -def setProvisionSpec(session, vm, ps): - """Set the provision spec of a template/VM""" - txt = printProvisionSpec(ps) - try: - session.xenapi.VM.remove_from_other_config(vm, "disks") - except: - pass - session.xenapi.VM.add_to_other_config(vm, "disks", txt) - -if __name__ == "__main__": - print("Unit test of provision XML spec module") - print("--------------------------------------") - ps = ProvisionSpec() - ps.disks.append(Disk("0", "1024", "0000-0000", True)) - ps.disks.append(Disk("1", "2048", "1111-1111", False)) - print("* Pretty-printing spec") - txt = printProvisionSpec(ps) - print(txt) - print("* Re-parsing output") - ps2 = parseProvisionSpec(txt) - print("* Pretty-printing spec") - txt2 = printProvisionSpec(ps) - print(txt2) - if txt != txt2: - raise AssertionError("Sanity-check failed: print(parse(print(x))) != print(x)") - print("* OK: print(parse(print(x))) == print(x)") From 920194697d64fc0ffb51354b98083c708f39b82f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 19 Jun 2024 15:13:20 +0000 Subject: [PATCH 127/341] CP-49925: Removed renameif.py from scripts/examples/python/ Signed-off-by: Ashwinh --- scripts/examples/python/renameif.py | 167 ---------------------------- 1 file changed, 167 deletions(-) delete mode 100755 scripts/examples/python/renameif.py diff --git a/scripts/examples/python/renameif.py b/scripts/examples/python/renameif.py deleted file mode 100755 index 4a3d796e1da..00000000000 --- a/scripts/examples/python/renameif.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2008 XenSource, Inc. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Allow the user to change the MAC address -> interface mapping - -from __future__ import print_function -import XenAPI, inventory, sys - -def warn(txt): - print(txt, file=sys.stderr) - -def show_pifs(pifs): - print("NIC MAC Notes") - print("----------------------------------------------") - for ref in pifs.keys(): - notes = [] - if pifs[ref]['management']: - notes.append("management interface") - nic = pifs[ref]['device'][3:] - try: - metrics = session.xenapi.PIF_metrics.get_record(session.xenapi.PIF.get_metrics(ref)) - if metrics['carrier']: - notes.append("carrier detected") - else: - notes.append("no carrier detected") - except: - pass - - print("%3s %s %s" % (nic, pifs[ref]['MAC'], ", ".join(notes))) - -def select(pifs, key): - """Select a PIF by device name or MAC""" - for ref in pifs.keys(): - if pifs[ref]['device'][3:] == key: - return ref - if pifs[ref]['MAC'].upper() == key.upper(): - return ref - return None - -def save(session, host, pifs): - """Commit changes""" - # Check that device names are unique - devices = [] - for ref in pifs.keys(): - devices.append(pifs[ref]['device'][3:]) - for i in set(devices): - devices.remove(i) - if devices != []: - print("ERROR: cannot assign two interfaces the same NIC number (%s)" % (", ".join(i))) - print("Aborted.") - sys.exit(1) - vifs = [] - for ref in pifs.keys(): - net = pifs[ref]['network'] - for vif in session.xenapi.network.get_VIFs(net): - if session.xenapi.VIF.get_currently_attached(vif): - vifs.append(vif) - if len(vifs) > 0: - plural = "" - if len(vifs) > 1: - plural = "s" - print("WARNING: this operation requires unplugging %d guest network interface%s" % (len(vifs), plural)) - print("Are you sure you want to continue? (yes/no) > ", end=' ') - if sys.stdin.readline().strip().lower() != "yes": - print("Aborted.") - sys.exit(1) - for vif in vifs: - dev = session.xenapi.VIF.get_device(vif) - vm = session.xenapi.VIF.get_VM(vif) - uuid = session.xenapi.VM.get_uuid(vm) - print("Hot-unplugging interface %s on VM %s" % (dev, uuid)) - session.xenapi.VIF.unplug(vif) - - for ref in pifs.keys(): - mac = pifs[ref]['MAC'] - if pifs[ref]['management']: - print("Disabling management NIC (%s)" % mac) - session.xenapi.host.management_disable() - session.xenapi.PIF.forget(ref) - for ref in pifs.keys(): - mac = pifs[ref]['MAC'] - device = pifs[ref]['device'] - mode = pifs[ref]['ip_configuration_mode'] - IP = pifs[ref]['IP'] - netmask = pifs[ref]['IP'] - gateway = pifs[ref]['gateway'] - DNS = pifs[ref]['DNS'] - new_ref = session.xenapi.PIF.introduce(host, mac, device) - session.xenapi.PIF.reconfigure_ip(new_ref, mode, IP, netmask, gateway, DNS) - if pifs[ref]['management']: - print("Re-enabling management NIC (%s)" % mac) - session.xenapi.host.management_reconfigure(new_ref) - - for vif in vifs: - dev = session.xenapi.VIF.get_device(vif) - vm = session.xenapi.VIF.get_VM(vif) - uuid = session.xenapi.VM.get_uuid(vm) - print("Hot-plugging interface %s on VM %s" % (dev, uuid)) - session.xenapi.VIF.plug(vif) - -def renameif(session): - uuid = inventory.get_localhost_uuid () - host = session.xenapi.host.get_by_uuid(uuid) - pool = session.xenapi.pool.get_all()[0] - master = session.xenapi.pool.get_master(pool) - if host != master: - warn("This host is a slave; it is not possible to rename the management interface") - - pifs = session.xenapi.PIF.get_all_records() - for ref in pifs.keys(): - if pifs[ref]['host'] != host or pifs[ref]['physical'] != True: - del pifs[ref] - - while True: - print("Current mappings:") - show_pifs(pifs) - print() - print("Type 'quit' to quit; 'save' to save; or a NIC number or MAC address to edit") - print("> ", end=' ') - x = sys.stdin.readline().strip() - if x.lower() == 'quit': - sys.exit(0) - if x.lower() == 'save': - # If a slave, filter out the management PIF - if host != master: - for ref in pifs.keys(): - if pifs[ref]['management']: - del pifs[ref] - save(session, host, pifs) - sys.exit(0) - pif = select(pifs, x) - if pif != None: - # Make sure this is not a slave's management PIF - if host != master and pifs[pif]['management']: - print("ERROR: cannot modify the management interface of a slave.") - else: - print("Selected NIC with MAC '%s'. Enter new NIC number:" % pifs[pif]['MAC']) - print("> ", end=' ') - nic = sys.stdin.readline().strip() - if not(nic.isdigit()): - print("ERROR: must enter a number (e.g. 0, 1, 2, 3, ...)") - else: - pifs[pif]['device'] = "eth" + nic - else: - print("NIC '%s' not found" % (x)) - print() - - -if __name__ == "__main__": - session = XenAPI.xapi_local() - session.login_with_password("", "", "1.0", "xen-api-scripts-renameifs.py") - try: - renameif(session) - finally: - session.logout() From d305227c7d14d1125c281ec202370b80073b2d40 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 20 Jun 2024 07:19:57 +0000 Subject: [PATCH 128/341] CP-49927: Removed smapiv2.py from scripts/examples Signed-off-by: Ashwinh --- scripts/examples/smapiv2.py | 293 ------------------------------------ 1 file changed, 293 deletions(-) delete mode 100644 scripts/examples/smapiv2.py diff --git a/scripts/examples/smapiv2.py b/scripts/examples/smapiv2.py deleted file mode 100644 index 1047f57825c..00000000000 --- a/scripts/examples/smapiv2.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import os, sys, time, socket, traceback - -log_f = open(os.dup(sys.stdout.fileno()), "w") -pid = None - -def reopenlog(log_file): - global log_f - if log_f: - log_f.close() - if log_file: - try: - log_f = open(log_file, "a") - except FileNotFoundError: - log_f = open(log_file, "w") - else: - log_f = open(os.dup(sys.stdout.fileno()), "a") - -def log(txt): - global log_f, pid - if not pid: - pid = os.getpid() - t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime()) - print("%s [%d] %s" % (t, pid, txt), file=log_f) - log_f.flush() - -# Functions to construct SMAPI return types ################################# - -unit = [ "Success", "Unit" ] - -# Throw this to return an SR_BACKEND_FAILURE to the caller ################## - -class BackendError(Exception): - def __init__(self, code, params): - self.code = code - self.params = params - def __str__(self): - return "BackendError(%s, %s)" % (self.code, ", ".join(self.params)) - -class Vdi_does_not_exist(Exception): - def __init__(self, vdi): - self.vdi = vdi - def __str__(self): - return "Vdi_does_not_exist(%s)" % self.vdi - -def vdi(vdi_info): -# return ['Success', ['Vdi', {'vdi': location, 'virtual_size': str(virtual_size) }]] - return ['Success', ['Vdi', vdi_info]] - -def vdis(vis): - return ['Success', ['Vdis', vis]] - -def params(params): - return ['Success', ['Params', params ]] - -def value(result): - return { "Status": "Success", "Value": result } - -def backend_error(code, params): - return [ "Failure", [ "Backend_error", code, params ] ] - -def internal_error(txt): - return [ "Failure", "Internal_error", txt ] - -def vdi_does_not_exist(): - return [ "Failure", "Vdi_does_not_exist" ] - -# Type-checking helper functions ############################################ - -vdi_info_types = { - "vdi": type(""), - "name_label": type(""), - "name_description": type(""), - "ty": type(""), - "metadata_of_pool": type(""), - "is_a_snapshot": type(True), - "snapshot_time": type(""), - "snapshot_of": type(""), - "read_only": type(True), - "cbt_enabled": type(True), - "virtual_size": type(""), - "physical_utilisation": type("") -} - -def make_vdi_info(v): - global vdi_info_types - for k in vdi_info_types: - t = vdi_info_types[k] - if t == type(""): - v[k] = str(v[k]) - elif t == type(True): - v[k] = str(v[k]).lower() == "true" - else: - raise BackendError("make_vdi_info unknown type", [ str(t) ]) - return v - -def vdi_info(v): - global vdi_info_types - for k in vdi_info_types: - if k not in v: - raise BackendError("vdi_info missing key", [ k, repr(v) ]) - t = vdi_info_types[k] - if type(v[k]) != t: - raise BackendError("vdi_info key has wrong type", [ k, str(t), str(type(v[k])) ]) - return v - -def expect_none(x): - if x != None: - raise BackendError("type error", [ "None", repr(x) ]) - -def expect_long(x): - if type(x) != type(0): - raise BackendError("type error", [ "long int", repr(x) ]) - -def expect_string(x): - if type(x) != type(""): - raise BackendError("type error", [ "string", repr(x) ]) - -# Well-known feature flags understood by xapi ############################## - -feature_sr_probe = "SR_PROBE" -feature_sr_update = "SR_UPDATE" -feature_sr_supports_local_caching = "SR_SUPPORTS_LOCAL_CACHING" -feature_vdi_create = "VDI_CREATE" -feature_vdi_destroy = "VDI_DESTROY" -feature_vdi_attach = "VDI_ATTACH" -feature_vdi_detach = "VDI_DETACH" -feature_vdi_resize = "VDI_RESIZE" -feature_vdi_resize_online = "VDI_RESIZE_ONLINE" -feature_vdi_clone = "VDI_CLONE" -feature_vdi_snapshot = "VDI_SNAPSHOT" -feature_vdi_activate = "VDI_ACTIVATE" -feature_vdi_deactivate = "VDI_DEACTIVATE" -feature_vdi_update = "VDI_UPDATE" -feature_vdi_introduce = "VDI_INTRODUCE" -feature_vdi_generate_config = "VDI_GENERATE_CONFIG" -feature_vdi_reset_on_boot = "VDI_RESET_ON_BOOT" - -# Unmarshals arguments and marshals results (including exceptions) ########## - -class Marshall: - def __init__(self, x): - self.x = x - - def query(self, args): - result = self.x.query() - return value(result) - - def sr_attach(self, args): - result = self.x.sr_attach(args["task"], args["sr"], args["device_config"]) - expect_none(result) - return value(unit) - def sr_detach(self, args): - result = self.x.sr_detach(args["task"], args["sr"]) - expect_none(result) - return value(unit) - def sr_destroy(self, args): - result = self.x.sr_destroy(args["task"], args["sr"]) - expect_none(result) - return value(unit) - def sr_scan(self, args): - vis = self.x.sr_scan(args["task"], args["sr"]) - result = [vdi_info(vi) for vi in vis] - return value(vdis(result)) - - def vdi_create(self, args): - vi = self.x.vdi_create(args["task"], args["sr"], vdi_info(args["vdi_info"]), args["params"]) - return value(vdi(vdi_info(vi))) - def vdi_destroy(self, args): - result = self.x.vdi_destroy(args["task"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - - def vdi_attach(self, args): - result = self.x.vdi_attach(args["task"], args["dp"], args["sr"], args["vdi"], args["read_write"]) - expect_string(result) - return value(params(result)) - def vdi_activate(self, args): - result = self.x.vdi_activate(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - def vdi_deactivate(self, args): - result = self.x.vdi_deactivate(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - def vdi_detach(self, args): - result = self.x.vdi_detach(args["task"], args["dp"], args["sr"], args["vdi"]) - expect_none(result) - return value(unit) - - - def _dispatch(self, method, params): - try: - log("method = %s params = %s" % (method, repr(params))) - args = params[0] - if method == "query": - return self.query(args) - elif method == "SR.attach": - return self.sr_attach(args) - elif method == "SR.detach": - return self.sr_detach(args) - elif method == "SR.scan": - return self.sr_scan(args) - elif method == "VDI.create": - return self.vdi_create(args) - elif method == "VDI.destroy": - return self.vdi_destroy(args) - elif method == "VDI.attach": - return self.vdi_attach(args) - elif method == "VDI.activate": - return self.vdi_activate(args) - elif method == "VDI.deactivate": - return self.vdi_deactivate(args) - elif method == "VDI.detach": - return self.vdi_detach(args) - except BackendError as e: - log("caught %s" % e) - traceback.print_exc() - return value(backend_error(e.code, e.params)) - except Vdi_does_not_exist as e: - log("caught %s" %e) - return value(vdi_does_not_exist()) - except Exception as e: - log("caught %s" % e) - traceback.print_exc() - return value(internal_error(str(e))) - -# Helper function to daemonise ############################################## -def daemonize(): - def fork(): - try: - if os.fork() > 0: - # parent - sys.exit(0) - except Exception as e: - print("fork() failed: %s" % e, file=sys.stderr) - traceback.print_exc() - raise - fork() - os.umask(0) - os.chdir("/") - os.setsid() - fork() - devnull = open("/dev/null", "r") - os.dup2(devnull.fileno(), sys.stdin.fileno()) - devnull = open("/dev/null", "aw") - os.dup2(devnull.fileno(), sys.stdout.fileno()) - os.dup2(devnull.fileno(), sys.stderr.fileno()) - -from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler - -# Server XMLRPC from any HTTP POST path ##################################### - -class RequestHandler(SimpleXMLRPCRequestHandler): - rpc_paths = [] - -# SimpleXMLRPCServer with SO_REUSEADDR ###################################### - -class Server(SimpleXMLRPCServer): - def __init__(self, ip, port): - SimpleXMLRPCServer.__init__(self, (ip, port), requestHandler=RequestHandler) - def server_bind(self): - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - SimpleXMLRPCServer.server_bind(self) - -# This is a hack to patch slow socket.getfqdn calls that -# BaseHTTPServer (and its subclasses) make. -# See: http://bugs.python.org/issue6085 -# See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/ -import http.server - -def _bare_address_string(self): - host, port = self.client_address[:2] - return '%s' % host - -http.server.BaseHTTPRequestHandler.address_string = \ - _bare_address_string - -# Given an implementation, serve requests forever ########################### - -def start(impl, ip, port, daemon): - if daemon: - log("daemonising") - daemonize() - log("will listen on %s:%d" % (ip, port)) - server = Server(ip, port) - log("server registered on %s:%d" % (ip, port)) - server.register_introspection_functions() # for debugging - server.register_instance(Marshall(impl)) - log("serving requests forever") - server.serve_forever() From bda644f95cc19d577290936a459c58aa8c0cd3ed Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 20 Jun 2024 07:20:52 +0000 Subject: [PATCH 129/341] CP-49927: Removed storage.py from scripts/examples Signed-off-by: Ashwinh --- scripts/examples/storage.py | 187 ------------------------------------ 1 file changed, 187 deletions(-) delete mode 100755 scripts/examples/storage.py diff --git a/scripts/examples/storage.py b/scripts/examples/storage.py deleted file mode 100755 index 91214a84db4..00000000000 --- a/scripts/examples/storage.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) Citrix Inc -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# Example storage backend using SMAPIv2 using raw files and Linux losetup - -# WARNING: this API is considered to be unstable and may be changed at-will - -from __future__ import print_function -import os, sys, subprocess, json - -import smapiv2 -from smapiv2 import log, start, BackendError, Vdi_does_not_exist - -root = "/sr/" - -# [run task cmd] executes [cmd], throwing a BackendError if exits with -# a non-zero exit code. -def run(task, cmd): - code, output = subprocess.getstatusoutput(cmd) - if code != 0: - log("%s: %s exitted with code %d: %s" % (task, cmd, code, output)) - raise BackendError - log("%s: %s" % (task, cmd)) - return output - -# Use Linux "losetup" to create block devices from files -class Loop: - # [_find task path] returns the loop device associated with [path] - def _find(self, task, path): - global root - for line in run(task, "losetup -a").split("\n"): - line = line.strip() - if line != "": - bits = line.split() - loop = bits[0][0:-1] - this_path = bits[2][1:-1] - if this_path == path: - return loop - return None - # [add task path] creates a new loop device for [path] and returns it - def add(self, task, path): - run(task, "losetup -f %s" % path) - return self._find(task, path) - # [remove task path] removes the loop device associated with [path] - def remove(self, task, path): - loop = self._find(task, path) - run(task, "losetup -d %s" % loop) - -# Use FreeBSD "mdconfig" to create block devices from files -class Mdconfig: - # [_find task path] returns the unit (mdX) associated with [path] - def _find(self, task, path): - # md0 vnode 1024M /root/big.img - for line in run(task, "mdconfig -l -v").split("\n"): - if line == "": - continue - bits = line.split() - this_path = bits[3] - if this_path == path: - return bits[0] # md0 - return None - # [add task path] returns a block device associated with [path] - def add(self, task, path): - return "/dev/" + run(task, "mdconfig -a -t vnode -f %s" % path) - # [remove task path] removes the block device associated with [path] - def remove(self, task, path): - md = self._find(task, path) - if md: - run(task, "mdconfig -d -u %s" % md) - -# [path_of_vdi vdi] returns the path in the local filesystem corresponding -# to vdi location [vdi] -def path_of_vdi(vdi): - global root - return root + vdi - -disk_suffix = ".raw" -metadata_suffix = ".json" - -class RawFiles: - def __init__(self, device): - self.device = device - - def query(self): - return { "name": "RawFiles", - "vendor": "XCP", - "version": "0.1", - "features": [ smapiv2.feature_vdi_create, - smapiv2.feature_vdi_destroy, - smapiv2.feature_vdi_attach, - smapiv2.feature_vdi_detach, - smapiv2.feature_vdi_activate, - smapiv2.feature_vdi_deactivate ] } - - def sr_attach(self, task, sr, device_config): - if not(os.path.exists(root)): - raise BackendError("SR directory doesn't exist", [ root ]) - def sr_detach(self, task, sr): - pass - def sr_destroy(self, task, sr): - pass - def sr_scan(self, task, sr): - global root - log("scanning") - results = [] - for name in os.listdir(root): - if name.endswith(metadata_suffix): - path = root + "/" + name - f = open(path, "r") - try: - vdi_info = json.loads(f.read()) - results.append(smapiv2.make_vdi_info(vdi_info)) - finally: - f.close() - return results - - def vdi_create(self, task, sr, vdi_info, params): - filename = run(task, "uuidgen") - run(task, "dd if=/dev/zero of=%s%s bs=1 count=0 seek=%s" % (path_of_vdi(filename), disk_suffix, vdi_info["virtual_size"])) - vdi_info["vdi"] = filename - f = open(path_of_vdi(filename) + metadata_suffix, "w") - try: - f.write(json.dumps(vdi_info)) - finally: - f.close() - return vdi_info - def vdi_destroy(self, task, sr, vdi): - if not (os.path.exists(path_of_vdi(vdi) + disk_suffix)): - raise Vdi_does_not_exist(vdi) - run(task, "rm -f %s%s" % (path_of_vdi(vdi), disk_suffix)) - run(task, "rm -f %s%s" % (path_of_vdi(vdi), metadata_suffix)) - - def vdi_attach(self, task, dp, sr, vdi, read_write): - path = path_of_vdi(vdi) + disk_suffix - loop = self.device.add(task, path) - log("loop = %s" % repr(loop)) - return loop - - def vdi_activate(self, task, dp, sr, vdi): - pass - def vdi_deactivate(self, task, dp, sr, vdi): - pass - def vdi_detach(self, task, dp, sr, vdi): - path = path_of_vdi(vdi) + disk_suffix - self.device.remove(task, path) - -if __name__ == "__main__": - from optparse import OptionParser - - parser = OptionParser() - parser.add_option("-l", "--log", dest="logfile", help="log to LOG", metavar="LOG") - parser.add_option("-p", "--port", dest="port", help="listen on PORT", metavar="PORT") - parser.add_option("-i", "--ip-addr", dest="ip", help="listen on IP", metavar="IP") - parser.add_option("-d", "--daemon", action="store_true", dest="daemon", help="run as a background daemon", metavar="DAEMON") - (options, args) = parser.parse_args() - if options.logfile: - from smapiv2 import reopenlog - reopenlog(options.logfile) - if not options.ip and not options.ip: - print("Need an --ip-addr and --port. Use -h for help", file=sys.stderr) - sys.exit(1) - - ip = options.ip - port = int(options.port) - - arch = run("startup", "uname") - if arch == "Linux": - log("startup: Using loop devices") - start(RawFiles(Loop()), ip, port, options.daemon) - elif arch == "FreeBSD": - log("startup: Using mdconfig devices") - start(RawFiles(Mdconfig()), ip, port, options.daemon) - else: - log("startup: Unknown architecture: %s" % arch) From f387691bee60c8d6789109a95198318f223bff86 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 20 Jun 2024 07:23:54 +0000 Subject: [PATCH 130/341] CP-49927: Removed storage-server from scripts/examples Signed-off-by: Ashwinh --- scripts/examples/storage-server | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100755 scripts/examples/storage-server diff --git a/scripts/examples/storage-server b/scripts/examples/storage-server deleted file mode 100755 index d5d859d9f14..00000000000 --- a/scripts/examples/storage-server +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -. /etc/rc.subr - -name="storageserver" -start_cmd="${name}_start" -stop_cmd=":" - -storageserver_start() -{ - ip=$(ifconfig xn0 | grep inet | cut -f 2 -d " ") - cd /root - /usr/local/bin/python storage.py --ip-addr ${ip} --port 8080 --log /var/log/SMlog --daemon - echo "storageserver started on ${ip}." -} - -load_rc_config $name -run_rc_command "$1" From e95a92bbdbeba4f5dd6adf3cbc62e255eb2a4939 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 20 Jun 2024 13:54:21 +0000 Subject: [PATCH 131/341] CP-49922: Move scripts/probe-device-for-file to python3/libexec Also fix multiple-import on same line and indentation/spacing. Signed-off-by: Ashwinh --- python3/Makefile | 1 + .../libexec}/probe-device-for-file | 15 +++++++++------ scripts/Makefile | 1 - 3 files changed, 10 insertions(+), 7 deletions(-) rename {scripts => python3/libexec}/probe-device-for-file (87%) diff --git a/python3/Makefile b/python3/Makefile index e86d5c683c3..2d27b6694fb 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -17,6 +17,7 @@ install: $(IPROG) libexec/usb_reset.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/probe-device-for-file $(DESTDIR)$(LIBEXECDIR) $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/probe-device-for-file b/python3/libexec/probe-device-for-file similarity index 87% rename from scripts/probe-device-for-file rename to python3/libexec/probe-device-for-file index be07f40758f..46882c2cbec 100755 --- a/scripts/probe-device-for-file +++ b/python3/libexec/probe-device-for-file @@ -2,13 +2,16 @@ # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 # Checks for the existence of a file on a device -import os, sys +import os +import sys + try: - import xenfsimage + import xenfsimage except ImportError: - import fsimage as xenfsimage + import fsimage as xenfsimage from contextlib import contextmanager + # https://stackoverflow.com/a/17954769 @contextmanager def stderr_redirected(to=os.devnull): @@ -33,7 +36,7 @@ def stderr_redirected(to=os.devnull): with open(to, 'w') as file: _redirect_stderr(to=file) try: - yield # allow code to be run with the redirected stderr + yield # allow code to be run with the redirected stderr finally: _redirect_stderr(to=old_stderr) # restore stderr. # buffering and flags such as @@ -41,8 +44,8 @@ def stderr_redirected(to=os.devnull): if __name__ == "__main__": if len(sys.argv) != 3: - print("Usage: %s " % sys.argv[0]) - sys.exit(2) + print("Usage: %s " % sys.argv[0]) + sys.exit(2) device = sys.argv[1] file = sys.argv[2] try: diff --git a/scripts/Makefile b/scripts/Makefile index 5700d4bd879..5e00c11dfea 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -117,7 +117,6 @@ install: $(IPROG) print-custom-templates $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) - $(IPROG) probe-device-for-file $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)/etc/sysconfig From d4524d07d1d3713ab6df2478405147740ba2160b Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 18 Jun 2024 13:04:30 +0000 Subject: [PATCH 132/341] CP-49910: Move scripts link-vms-by-sr.py from to python3/libexec Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3/libexec}/link-vms-by-sr.py | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 1 deletion(-) rename {scripts => python3/libexec}/link-vms-by-sr.py (100%) diff --git a/python3/Makefile b/python3/Makefile index e86d5c683c3..f05b186d148 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -14,6 +14,7 @@ install: $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ + $(IPROG) libexec/link-vms-by-sr.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_reset.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) diff --git a/scripts/link-vms-by-sr.py b/python3/libexec/link-vms-by-sr.py similarity index 100% rename from scripts/link-vms-by-sr.py rename to python3/libexec/link-vms-by-sr.py diff --git a/scripts/Makefile b/scripts/Makefile index 5700d4bd879..3b468e4e591 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -113,7 +113,6 @@ install: $(IPROG) host-display $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin - $(IPROG) link-vms-by-sr.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) print-custom-templates $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) From 5102a23534aa4cd9da82640a7cbb0aa75e5c3b3f Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 24 Jun 2024 12:00:00 +0200 Subject: [PATCH 133/341] Fix pyright: Set stubPath and fix hfx_filename to work with it Signed-off-by: Bernhard Kaindl --- pyproject.toml | 2 ++ python3/bin/hfx_filename | 2 +- python3/libexec/link-vms-by-sr.py | 4 ++-- python3/stubs/xcp/logger.pyi | 2 ++ 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c711c24c652..74f4adf981e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -194,9 +194,11 @@ disable = [ # typeCheckingMode: set the standard type checking mode include = ["python3", "ocaml/xcp-rrdd"] strict = ["python3/tests/observer"] +stubPath = "python3/stubs" pythonPlatform = "Linux" typeCheckingMode = "standard" reportMissingImports = false +reportMissingModuleSource = false pythonVersion = "3.6" exclude = [ "ocaml/xcp-rrdd/scripts/rrdd/rrdd.py", diff --git a/python3/bin/hfx_filename b/python3/bin/hfx_filename index 28fb05bbc78..616e5921abb 100755 --- a/python3/bin/hfx_filename +++ b/python3/bin/hfx_filename @@ -87,7 +87,7 @@ def read_field(session_id, table, fld, rf): if __name__ == "__main__": xapi = XenAPI.xapi_local() - xapi.xenapi.login_with_password('root', '') + xapi.xenapi.login_with_password("root", "", "1.0", "hfx_filename") session_id = xapi._session try: rf = db_get_by_uuid(session_id, "pool_patch", sys.argv[1]) diff --git a/python3/libexec/link-vms-by-sr.py b/python3/libexec/link-vms-by-sr.py index 98fcfa587ed..e845dd12bfe 100755 --- a/python3/libexec/link-vms-by-sr.py +++ b/python3/libexec/link-vms-by-sr.py @@ -86,9 +86,9 @@ def main(): print("Failed to create directory: %s" % linkdir, file=sys.stderr) for vmuuid in list(vms_in_sr[sruuid].keys()): + src = "../../all/{}.vmmeta".format(vmuuid) + targ = "{}/{}.vmmeta".format(linkdir, vmuuid) try: - src = "../../all/{}.vmmeta".format(vmuuid) - targ = "{}/{}.vmmeta".format(linkdir, vmuuid) os.symlink(src, targ) except: print("Failed to create symlink: %s -> %s" % (src, targ), file=sys.stderr) diff --git a/python3/stubs/xcp/logger.pyi b/python3/stubs/xcp/logger.pyi index f4aa2dab371..0b42b05eb47 100644 --- a/python3/stubs/xcp/logger.pyi +++ b/python3/stubs/xcp/logger.pyi @@ -1,6 +1,8 @@ # Minimal stub for xcp.logger module def debug(*al, **ad) -> None: ... +def info(*al, **ad) -> None: ... def error(*al, **ad) -> None: ... def warning(*al, **ad) -> None: ... +def critical(*al, **ad) -> None: ... def logToStdout(level) -> bool: ... def logToSyslog(level) -> bool: ... From 9f9511884812a05db647764513dd92f86558e5d0 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 24 Jun 2024 12:00:00 +0200 Subject: [PATCH 134/341] CP-49910/pylint: Add docstrings and comments for link-vms-by-sr.py Signed-off-by: Bernhard Kaindl --- python3/Makefile | 1 + python3/libexec/link-vms-by-sr.py | 34 ++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/python3/Makefile b/python3/Makefile index f05b186d148..01735511460 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -6,6 +6,7 @@ IDATA=install -m 644 SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") install: + # Create destination directories using install -m 755 -d: $(IPROG) -d $(DESTDIR)$(OPTDIR)/bin $(IPROG) -d $(DESTDIR)$(SITE3_DIR) $(IPROG) -d $(DESTDIR)$(LIBEXECDIR) diff --git a/python3/libexec/link-vms-by-sr.py b/python3/libexec/link-vms-by-sr.py index e845dd12bfe..2b5a37a8ba9 100755 --- a/python3/libexec/link-vms-by-sr.py +++ b/python3/libexec/link-vms-by-sr.py @@ -1,5 +1,21 @@ #!/usr/bin/env python3 -# Populate a directory of symlinks partitioning VMs by SR +""" +link-vms-by-sr.py - Populate the given input_directory with VM metadata files, + and create a directory structure of + symlinks to the metadata files, partitioning VMs by SR UUID. + +Usage: + link-vms-by-sr.py -d + +The script uses the XenAPI to get a list of VMs in each SR +and get the metadata for each VM, writing the metadata to the input_dir, +and creating symlink directories in the input_dir/by-sr directory. + +Below the input_dir, given by -d : +- In the /all/ directory, store all VM metadata files. +- In the /by-sr/ directory, create symlinks to the VM metadata files, + partitioned by a directory structure of SR UUIDs. +""" # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 from __future__ import print_function @@ -15,18 +31,21 @@ def logout(session): + """atexit handler to logout of the xapi session, ignoring any exceptions""" with contextlib.suppress(Exception): session.xenapi.session.logout() -def get_input_dir(): +def get_input_dir_from_argparser(): + """Parse command line arguments (-d input_dir) and return the input directory""" parser = argparse.ArgumentParser() parser.add_argument("-d", dest="input_dir", required=True, help="Specify the input directory") args = parser.parse_args() return args.input_dir -def get_vms_in_sr(session): +def get_vms_in_sr_from_xapi(session): + """Return a dictionary of SR UUIDs to VM UUIDs""" vms = session.xenapi.VM.get_all_records() vbds = session.xenapi.VBD.get_all_records() vdis = session.xenapi.VDI.get_all_records() @@ -67,13 +86,18 @@ def get_vms_in_sr(session): def main(): + """Main function to save VM metadata files and link them by SR UUID""" + + # Get a session for the local host, login and register a logout handler session = XenAPI.xapi_local() session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-linkvmsbysr.py") atexit.register(logout, session) - input_dir = get_input_dir() - vms_in_sr = get_vms_in_sr(session) + # Parse the input directory and get the VMs in each SR + input_dir = get_input_dir_from_argparser() + vms_in_sr = get_vms_in_sr_from_xapi(session) + # Create the directory structure and populate it with symlinks for sruuid in list(vms_in_sr.keys()): linkdir = "{}/by-sr/{}".format(input_dir, sruuid) if Path(linkdir).is_dir(): From 1139c5b840f2b3a898f151c56d7946d907dd1c0a Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 24 Jun 2024 12:00:00 +0200 Subject: [PATCH 135/341] CP-49910/pylint: Fix bare-except: mkdir/symlink raise OSErrors Signed-off-by: Bernhard Kaindl --- python3/libexec/link-vms-by-sr.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python3/libexec/link-vms-by-sr.py b/python3/libexec/link-vms-by-sr.py index 2b5a37a8ba9..0df20b4bf97 100755 --- a/python3/libexec/link-vms-by-sr.py +++ b/python3/libexec/link-vms-by-sr.py @@ -106,7 +106,7 @@ def main(): try: Path(linkdir).mkdir(parents=True) - except: + except OSError: print("Failed to create directory: %s" % linkdir, file=sys.stderr) for vmuuid in list(vms_in_sr[sruuid].keys()): @@ -114,7 +114,7 @@ def main(): targ = "{}/{}.vmmeta".format(linkdir, vmuuid) try: os.symlink(src, targ) - except: + except OSError: print("Failed to create symlink: %s -> %s" % (src, targ), file=sys.stderr) session.xenapi.logout() From 35724f7be4b5b32115e42836aaa00766a394a9f1 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 25 Jun 2024 10:35:03 +0000 Subject: [PATCH 136/341] CP-49907: Moved host-display to python3/libexec directory - Modified Makefile to include host-display in python3 directory Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3/libexec}/host-display | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 1 deletion(-) rename {scripts => python3/libexec}/host-display (100%) diff --git a/python3/Makefile b/python3/Makefile index 52e8eec787e..115933e8d4d 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -15,6 +15,7 @@ install: $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ + $(IPROG) libexec/host-display $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/link-vms-by-sr.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_reset.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) diff --git a/scripts/host-display b/python3/libexec/host-display similarity index 100% rename from scripts/host-display rename to python3/libexec/host-display diff --git a/scripts/Makefile b/scripts/Makefile index ed91ab5b866..145ab1fe4cf 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -110,7 +110,6 @@ install: $(IPROG) set-hostname $(DESTDIR)$(LIBEXECDIR) $(IPROG) update-mh-info $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-bugreport-upload $(DESTDIR)$(LIBEXECDIR)/host-bugreport-upload - $(IPROG) host-display $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) print-custom-templates $(DESTDIR)$(LIBEXECDIR) From c351b3e7aa44372c1aa4a44664250f545d46034f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 25 Jun 2024 13:51:45 +0000 Subject: [PATCH 137/341] CP-49921: Moved print-custom-templates from scripts to python3/libexec directory - Modified python3/Makefile to include this change. Signed-off-by: Ashwinh --- python3/Makefile | 3 ++- {scripts => python3/libexec}/print-custom-templates | 4 ++-- scripts/Makefile | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) rename {scripts => python3/libexec}/print-custom-templates (90%) diff --git a/python3/Makefile b/python3/Makefile index 52e8eec787e..24a8d7dfbee 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -20,7 +20,8 @@ install: $(IPROG) libexec/usb_scan.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/probe-device-for-file $(DESTDIR)$(LIBEXECDIR) - + $(IPROG) libexec/print-custom-templates $(DESTDIR)$(LIBEXECDIR) + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/print-custom-templates b/python3/libexec/print-custom-templates similarity index 90% rename from scripts/print-custom-templates rename to python3/libexec/print-custom-templates index 4ae15250951..882dc068732 100755 --- a/scripts/print-custom-templates +++ b/python3/libexec/print-custom-templates @@ -20,8 +20,8 @@ def main(argv): atexit.register(logout, session) templates = session.xenapi.VM.get_all_records_where('field "is_a_template" = "true" and field "is_a_snapshot" = "false"' ) - except: - print("Error retrieving template list", file=sys.stderr) + except Exception as e: + print(type(e).__name__, "retrieving template list:", e, file=sys.stderr) sys.exit(1) output=[] diff --git a/scripts/Makefile b/scripts/Makefile index ed91ab5b866..23cde5f3c65 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -113,7 +113,6 @@ install: $(IPROG) host-display $(DESTDIR)$(LIBEXECDIR) $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin - $(IPROG) print-custom-templates $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) From ced62bd4c52f6ee2a6121c97b40b66db1ecedc7f Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 18 Jun 2024 10:48:53 +0000 Subject: [PATCH 138/341] CP-49920: Moved power-on.py from scripts/poweron to python3/poweron - Modified Makefile to include power-on.py under python3 directory - Removed power-on.py from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 2 ++ {scripts => python3}/poweron/power-on.py | 5 +++-- scripts/Makefile | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) rename {scripts => python3}/poweron/power-on.py (95%) diff --git a/python3/Makefile b/python3/Makefile index 52e8eec787e..c9311858196 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -25,6 +25,8 @@ install: $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) +# poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan $(IPROG) poweron/DRAC.py $(DESTDIR)$(PLUGINDIR)/DRAC.py + $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host diff --git a/scripts/poweron/power-on.py b/python3/poweron/power-on.py similarity index 95% rename from scripts/poweron/power-on.py rename to python3/poweron/power-on.py index 34fec2f1e60..bad899c343d 100644 --- a/scripts/poweron/power-on.py +++ b/python3/poweron/power-on.py @@ -3,6 +3,7 @@ # Example script which shows how to use the XenAPI to find a particular Host's management interface # and send it a wake-on-LAN packet. +import sys import syslog import time @@ -26,8 +27,8 @@ def waitForXapi(session, host): metrics = session.xenapi.host.get_metrics(host) try: finished = session.xenapi.host_metrics.get_live(metrics) - except: - pass + except Exception as e: + print(type(e).__name__, "occurred:", e, file=sys.stderr) return str(finished) diff --git a/scripts/Makefile b/scripts/Makefile index ed91ab5b866..726c4a664ce 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -166,8 +166,6 @@ endif $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IPROG) examples/python/echo.py $(DESTDIR)$(PLUGINDIR)/echo $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py -# poweron - $(IPROG) poweron/power-on.py $(DESTDIR)$(PLUGINDIR)/power-on-host # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) $(IDATA) yum-plugins/accesstoken.conf $(DESTDIR)$(YUMPLUGINCONFDIR) From ef9f1f04bc084647c6e15f939ea07b09a52ce510 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 25 Jun 2024 15:53:28 +0000 Subject: [PATCH 139/341] CP-49909: Moved install-sup-pack from scripts/plugins to python3/plugins directory - fixed bare-except and raise-missing errors Signed-off-by: Ashwinh --- python3/Makefile | 2 ++ {scripts => python3}/plugins/install-supp-pack | 12 ++++++------ scripts/Makefile | 1 - 3 files changed, 8 insertions(+), 7 deletions(-) rename {scripts => python3}/plugins/install-supp-pack (90%) diff --git a/python3/Makefile b/python3/Makefile index 4f625d264be..4117ad4fc35 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -27,6 +27,8 @@ install: $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) + # poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wake-on-lan diff --git a/scripts/plugins/install-supp-pack b/python3/plugins/install-supp-pack similarity index 90% rename from scripts/plugins/install-supp-pack rename to python3/plugins/install-supp-pack index 8143215c4b2..83db0303186 100755 --- a/scripts/plugins/install-supp-pack +++ b/python3/plugins/install-supp-pack @@ -36,8 +36,8 @@ def install(session, args): vdi_ref = None try: vdi_ref = session.xenapi.VDI.get_by_uuid(vdi) - except: - raise ArgumentError("VDI parameter invalid") + except Exception as exc: + raise ArgumentError("VDI parameter invalid") from exc inventory = xcp.environ.readInventory() this_host_uuid = inventory["INSTALLATION_UUID"] @@ -46,8 +46,8 @@ def install(session, args): update_ref = None try: update_ref = session.xenapi.pool_update.introduce(vdi_ref) - except: - raise ArgumentError("VDI contains invalid update package") + except Exception as exc: + raise ArgumentError("VDI contains invalid update package") from exc try: session.xenapi.pool_update.apply(update_ref, this_host_ref) @@ -57,9 +57,9 @@ def install(session, args): # "['ERRORCODE', 'error_message']" # fetch the error_message and display it. error = json.loads(str(e))[1].encode("utf8") - except: + except Exception: error = str(e) - raise InstallFailure("Failed to install the supplemental pack", error) + raise InstallFailure("Failed to install the supplemental pack", error) from e return "OK" diff --git a/scripts/Makefile b/scripts/Makefile index ffcee71e5d5..434d0819de1 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -126,7 +126,6 @@ install: $(IPROG) plugins/perfmon $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead From f80b1260de9a2ddd15b22b1bbf627e0950365946 Mon Sep 17 00:00:00 2001 From: Ashwin Date: Thu, 27 Jun 2024 04:29:48 +0530 Subject: [PATCH 140/341] CP-49903: mv echo plugin from scripts/examples/python/echo.py to python3/plugins (#5742) Signed-off-by: Ashwinh Co-authored-by: Bernhard Kaindl --- python3/Makefile | 1 + {scripts/examples/python => python3/plugins}/echo.py | 4 ++-- scripts/Makefile | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) rename {scripts/examples/python => python3/plugins}/echo.py (74%) diff --git a/python3/Makefile b/python3/Makefile index 4117ad4fc35..8f34cb8e107 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -28,6 +28,7 @@ install: $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo # poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py diff --git a/scripts/examples/python/echo.py b/python3/plugins/echo.py similarity index 74% rename from scripts/examples/python/echo.py rename to python3/plugins/echo.py index 57f70492c6c..27020e17065 100644 --- a/scripts/examples/python/echo.py +++ b/python3/plugins/echo.py @@ -5,8 +5,8 @@ import XenAPIPlugin - -def main(session, args): +# The 1st argument is the session. This plugin does not use it, hence use _: +def main(_, args): if "sleep" in args: secs = int(args["sleep"]) time.sleep(secs) diff --git a/scripts/Makefile b/scripts/Makefile index 434d0819de1..705b161158a 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -161,7 +161,6 @@ endif sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE3_DIR)/ - $(IPROG) examples/python/echo.py $(DESTDIR)$(PLUGINDIR)/echo $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) From d6ac6a6f85757fc087d2a0863dfcddaa9f29f242 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 26 Jun 2024 12:00:00 +0200 Subject: [PATCH 141/341] CP-49928: test_static_vids.py: mv to py3, test list_vdis & fresh_name Signed-off-by: Bernhard Kaindl --- python3/tests/test_static_vdis.py | 85 +++++++++++++++++++++++++++++++ scripts/test_static_vdis.py | 56 -------------------- 2 files changed, 85 insertions(+), 56 deletions(-) create mode 100644 python3/tests/test_static_vdis.py delete mode 100644 scripts/test_static_vdis.py diff --git a/python3/tests/test_static_vdis.py b/python3/tests/test_static_vdis.py new file mode 100644 index 00000000000..ee424c157a1 --- /dev/null +++ b/python3/tests/test_static_vdis.py @@ -0,0 +1,85 @@ +"""python3/tests/test_static_vdis.py: Test the static-vdis script""" + +import os +from pathlib import Path +from types import ModuleType + +import pytest + +from python3.tests.import_helper import import_file_as_module, mocked_modules + +# ---------------------------- Test fixtures --------------------------------- + + +@pytest.fixture(scope="function") # function scope: Re-run for each test function +def static_vdis() -> ModuleType: + """Test fixture to return the static-vdis module, mocked to avoid dependencies.""" + with mocked_modules("XenAPI", "inventory"): + return import_file_as_module("scripts/static-vdis") + + +# Hide pylint warnings for redefined-outer-name from using the static_vdis fixture: +# pylint: disable=redefined-outer-name +# Allow to access attributes of the static_vdis module from this test module: +# pyright: reportAttributeAccessIssue=false + +# ----------------------------- Test cases ----------------------------------- + + +def test_whole_file(static_vdis: ModuleType): + """Test read_whole_file() and write_whole_file()""" + + with open(__file__, encoding="utf-8") as data: + contents = data.read().strip() + assert static_vdis.read_whole_file(__file__) == contents + assert static_vdis.write_whole_file(__file__, contents) is None + with open(__file__, encoding="utf-8") as written_data: + assert written_data.read().strip() == contents + + +def test_fresh_name(static_vdis: ModuleType, tmp_path: Path): + """Test fresh_name() and list_vdis() - all code paths""" + + # When the freshly created tmp_path is empty, expect [] and "0": + static_vdis.main_dir = tmp_path.as_posix() + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + # When main_dir contains a directory with name "0", the next name should be "1": + os.mkdir(static_vdis.main_dir + "/0") + assert static_vdis.fresh_name() == "1" + + # When main_dir contains a directory with name "1", the next name should be "2": + os.mkdir(static_vdis.main_dir + "/1") + assert static_vdis.fresh_name() == "2" + + # When main_dir does not exist, an empty list and 0 should be returned: + static_vdis.main_dir = tmp_path.as_posix() + "/does-not-exist" + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + + +def test_sr_attach(static_vdis: ModuleType, mocker): + """Test sr_attach()""" + + # We need to mock those as they would attempt to load the volume plugin and + # check the clusterstack, which are not available in the test environment: + static_vdis.call_volume_plugin = mocker.MagicMock() + static_vdis.check_clusterstack = mocker.MagicMock() + + # Set the return value of the mocked functions to success: + static_vdis.call_volume_plugin.return_value = "success" + static_vdis.check_clusterstack.return_value = "success" + + # Call the sr_attach function + device_config = {"key1": "value1", "key2": "value2"} + result = static_vdis.sr_attach("plugin_name", device_config) + + # Assert the expected behavior + assert result == "success" + static_vdis.call_volume_plugin.assert_called_once_with( + "plugin_name", + "SR.attach", + ["--configuration", "key1", "value1", "--configuration", "key2", "value2"], + ) \ No newline at end of file diff --git a/scripts/test_static_vdis.py b/scripts/test_static_vdis.py deleted file mode 100644 index b0ab6ad5939..00000000000 --- a/scripts/test_static_vdis.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -# unittest for static-vdis - -import unittest -from mock import MagicMock -import sys -import os -import subprocess -import tempfile - -# mock modules to avoid dependencies -sys.modules["XenAPI"] = MagicMock() -sys.modules["inventory"] = MagicMock() - -def import_from_file(module_name, file_path): - """Import a file as a module""" - if sys.version_info.major == 2: - return None - else: - from importlib import machinery, util - loader = machinery.SourceFileLoader(module_name, file_path) - spec = util.spec_from_loader(module_name, loader) - assert spec - assert spec.loader - module = util.module_from_spec(spec) - # Probably a good idea to add manually imported module stored in sys.modules - sys.modules[module_name] = module - spec.loader.exec_module(module) - return module - -def get_module(): - """Import the static-vdis script as a module for executing unit tests on functions""" - testdir = os.path.dirname(__file__) - return import_from_file("static_vdis", testdir + "/static-vdis") - -static_vdis = get_module() - -@unittest.skipIf(sys.version_info < (3, 0), reason="requires python3") -class TestReadWriteFile(unittest.TestCase): - def test_write_and_read_whole_file(self): - """Test read_whole_file and write_whole_file""" - test_file = tempfile.NamedTemporaryFile(delete=True) - filename = str(test_file.name) - content = r"""def read_whole_file(filename): - with open(filename, 'r', encoding='utf-8') as f: - return ''.join(f.readlines()).strip() - -def write_whole_file(filename, contents): - with open(filename, "w", encoding='utf-8') as f: - f.write(contents)""" - static_vdis.write_whole_file(filename, content) - expected_content = static_vdis.read_whole_file(filename) - self.assertEqual(expected_content, content) - - \ No newline at end of file From a02153b6f81500ec26e82d7f615f3d9601764197 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 25 Jun 2024 12:00:00 +0200 Subject: [PATCH 142/341] CP-49928: Fix pytype warnings in scripts/static-vdis Signed-off-by: Bernhard Kaindl --- pyproject.toml | 2 +- scripts/static-vdis | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5bf10170401..83a54c6d978 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,7 @@ exclude_lines = [ "pass", # Other specific lines that do not need to be covered, comment in which file: "raise NbdDeviceNotFound", # python3/libexec/usb_scan.py + "params = xmlrpc.client.loads", # static-vdis ] # precision digits to use when reporting coverage (sub-percent-digits are not reported): precision = 0 @@ -261,7 +262,6 @@ expected_to_fail = [ "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", # Other fixes needed: - "scripts/static-vdis", "scripts/plugins/extauth-hook-AD.py", ] diff --git a/scripts/static-vdis b/scripts/static-vdis index 77c9790b71e..9ca8b1d352a 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -3,10 +3,22 @@ # Common functions for managing statically-attached (ie onboot, without xapi) VDIs -import sys, os, subprocess, json, urllib.parse +import json +import os import os.path +import subprocess +import sys import time -import XenAPI, inventory, xmlrpc.client +import urllib.parse +import xmlrpc.client +from typing import TYPE_CHECKING + +import XenAPI + +import inventory + +if TYPE_CHECKING: + from typing import Any, Dict main_dir = "/etc/xensource/static-vdis" @@ -77,6 +89,7 @@ def check_clusterstack(ty): wait_for_corosync_quorum() def sr_attach(ty, device_config): + # type: (str, Dict[str, object]) -> str check_clusterstack(ty) args = [arg for (k,v) in device_config.items() @@ -238,7 +251,7 @@ def call_backend_attach(driver, config): return path def call_backend_detach(driver, config): - params = xmlrpc.client.loads(config)[0][0] + params = xmlrpc.client.loads(config)[0][0] # type: Any params['command'] = 'vdi_detach_from_config' config = xmlrpc.client.dumps(tuple([params]), params['command']) xml = doexec([ driver, config ]) @@ -388,4 +401,3 @@ if __name__ == "__main__": detach(sys.argv[2]) else: usage() - From 49ed5e62028befa407e7d88387b50eae5bf544ab Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 27 Jun 2024 09:28:56 +0000 Subject: [PATCH 143/341] CP-49911: Removed lvhd-api-test.py from scripts/examples/python/ Signed-off-by: Ashwinh --- scripts/examples/python/lvhd-api-test.py | 29 ------------------------ 1 file changed, 29 deletions(-) delete mode 100644 scripts/examples/python/lvhd-api-test.py diff --git a/scripts/examples/python/lvhd-api-test.py b/scripts/examples/python/lvhd-api-test.py deleted file mode 100644 index 4b7786d3f27..00000000000 --- a/scripts/examples/python/lvhd-api-test.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function -import XenAPI, sys - -def go(x, name): - vm = x.xenapi.VM.get_by_name_label(name)[0] - vbds = x.xenapi.VM.get_VBDs(vm) - non_empty = filter(lambda y:not(x.xenapi.VBD.get_empty(y)), vbds) - vdis = map(lambda y:x.xenapi.VBD.get_VDI(y), non_empty) - - print("Calling API call on %s" % (repr(vdis))) - result = x.xenapi.SR.lvhd_stop_using_these_vdis_and_call_script(vdis, "echo", "main", { "hello": "there", "sleep": "10" }) - print(repr(result)) - - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage:", file=sys.stderr) - print(" %s " % (sys.argv[0]), file=sys.stderr) - print(" -- Call SR.lvhd_stop_using_these_vdis_and_call_script with all VDIs with VBDs (attached or not) linking to specified VM", file=sys.stderr) - sys.exit(1) - name = sys.argv[1] - x = XenAPI.xapi_local() - x.xenapi.login_with_password("root", "", "1.0", "xen-api-scripts-lvhd-api-test.py") - try: - go(x, name) - finally: - x.xenapi.logout() From 39a5384c7f7cc5a62c8c9beef9348aebc8c8c49c Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 27 Jun 2024 10:58:42 +0000 Subject: [PATCH 144/341] CP-50091: Moved inventory.py from scripts/examples/python/ to python3/packages/inventory.py - Fixed bare-except, unspecified-encoding and indentation issue Signed-off-by: Ashwinh --- python3/Makefile | 1 + python3/packages/inventory.py | 37 ++++++++++++++++++++++++++++ scripts/Makefile | 2 -- scripts/examples/python/inventory.py | 32 ------------------------ 4 files changed, 38 insertions(+), 34 deletions(-) create mode 100644 python3/packages/inventory.py delete mode 100644 scripts/examples/python/inventory.py diff --git a/python3/Makefile b/python3/Makefile index 8f34cb8e107..91479a31d8d 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -13,6 +13,7 @@ install: $(IPROG) -d $(DESTDIR)$(PLUGINDIR) + $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ $(IPROG) libexec/host-display $(DESTDIR)$(LIBEXECDIR) diff --git a/python3/packages/inventory.py b/python3/packages/inventory.py new file mode 100644 index 00000000000..87847cf5cde --- /dev/null +++ b/python3/packages/inventory.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +""" +inventory.py + +This module defines functions to read and parse constants from the xensource-inventory file. +""" +import sys + +INVENTORY = "@INVENTORY@" +INSTALLATION_UUID = "INSTALLATION_UUID" + + +def read_kvpairs(filename): + """Read in a file of key-value pairs in the format used by the inventory file""" + all_entries = {} + with open(filename, 'r', encoding='utf-8') as f: + for line in f: + equals = line.index("=") + key = line[:equals] + value = line[equals+1:].strip().strip("'") + all_entries[key] = value + return all_entries + + +def parse(): + """Return the contents of the xensource inventory file as a dictionary""" + try: + return read_kvpairs(INVENTORY) + except FileNotFoundError as e: + print("Error: File '{}' not found. {}".format(INVENTORY, e), file=sys.stderr) + return {} + + +def get_localhost_uuid(): + """Return the UUID of the local host""" + return parse()[INSTALLATION_UUID] diff --git a/scripts/Makefile b/scripts/Makefile index 705b161158a..8204a2a0e66 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -155,12 +155,10 @@ install: ifneq ($(BUILD_PY2), NO) $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE_DIR)/ $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE_DIR)/ - $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE_DIR)/ endif $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ - $(IDATA) examples/python/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) diff --git a/scripts/examples/python/inventory.py b/scripts/examples/python/inventory.py deleted file mode 100644 index 9fd645b5d32..00000000000 --- a/scripts/examples/python/inventory.py +++ /dev/null @@ -1,32 +0,0 @@ -# Simple functions to read the constants from the xensource-inventory file - -INVENTORY="@INVENTORY@" -INSTALLATION_UUID="INSTALLATION_UUID" - - -def read_kvpairs(filename): - """Read in a file of key-value pairs in the format used by the inventory file""" - f = open(filename) - all_entries = {} - try: - for line in f.readlines(): - equals = line.index("=") - key = line[0:equals] - value = line[equals+1:].strip().strip("'") - all_entries[key] = value - finally: - f.close() - return all_entries - - -def parse(): - """Return the contents of the xensource inventory file as a dictionary""" - try: - return read_kvpairs(INVENTORY) - except: - return {} - - -def get_localhost_uuid(): - """Return the UUID of the local host""" - return parse()[INSTALLATION_UUID] From 1cd2fa5857acbf2bbd1e929385fefd2b779785c8 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 1 Jul 2024 10:01:54 +0000 Subject: [PATCH 145/341] CP-49900: Moved scripts/templates to python3/templates directory - Modified code to using 2to3 - Fixed except issue - Removed templates from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 3 + {scripts => python3}/templates/debian | 83 ++++++++++++++++++--------- {scripts => python3}/templates/debug | 0 scripts/Makefile | 3 - 4 files changed, 58 insertions(+), 31 deletions(-) rename {scripts => python3}/templates/debian (69%) rename {scripts => python3}/templates/debug (100%) diff --git a/python3/Makefile b/python3/Makefile index 8f34cb8e107..3f724d972c7 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -29,6 +29,9 @@ install: $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo +# templates + $(IPROG) templates/debian $(DESTDIR)$(OPTDIR)/packages/post-install-scripts/debian-etch + $(IPROG) templates/debug $(DESTDIR)$(OPTDIR)/packages/post-install-scripts # poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py diff --git a/scripts/templates/debian b/python3/templates/debian similarity index 69% rename from scripts/templates/debian rename to python3/templates/debian index 9350a40a57d..4e9b12a8714 100644 --- a/scripts/templates/debian +++ b/python3/templates/debian @@ -1,30 +1,46 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2005-2007 XenSource, Inc # Code ripped out of 'xgt' script for now from __future__ import print_function -import commands, xmlrpclib, os, sys, httplib, socket, urllib2, signal + +import os +import signal +import socket +import sys + +import commands +import httplib +import urllib2 +import xmlrpclib verbose = True + ##### begin hack. Provide xmlrpc over UNIX domain socket (cut+pasted from eliloader): class UDSHTTPConnection(httplib.HTTPConnection): - """ Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain - sockets. """ + """Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain + sockets.""" + def connect(self): path = self.host.replace("_", "/") self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.connect(path) + class UDSHTTP(httplib.HTTP): _connection_class = UDSHTTPConnection + class UDSTransport(xmlrpclib.Transport): def make_connection(self, host): return UDSHTTP(host) + def xapi_local(): return xmlrpclib.Server("http://_var_xapi_xapi/", transport=UDSTransport()) + + ##### end hack. @@ -36,43 +52,47 @@ def run(cmd, *args): debug("+ " + cmd % args) (ret, out) = commands.getstatusoutput(cmd % args) if verbose: - try: + try: for line in out.split("\n"): log("| " + line) except TypeError as e: pass if ret != 0: - debug ("run - command %s failed with %d" , cmd, ret) + debug("run - command %s failed with %d", cmd, ret) raise CommandException(out) return out + def log(fmt, *args): print(fmt % args) + def debug(msg, *args): if verbose: print(msg % args) + def create_partition(lvpath): # 1. write a partition table: - pipe = os.popen('/sbin/fdisk %s' % lvpath, 'w') + pipe = os.popen("/sbin/fdisk %s" % lvpath, "w") - pipe.write('n\n') # new partition - pipe.write('p\n') # primary - pipe.write("1\n") # 1st partition - pipe.write('\n') # default start cylinder - pipe.write('\n') # size: as big as image - pipe.write('w\n') # write partition table + pipe.write("n\n") # new partition + pipe.write("p\n") # primary + pipe.write("1\n") # 1st partition + pipe.write("\n") # default start cylinder + pipe.write("\n") # size: as big as image + pipe.write("w\n") # write partition table # XXX we must ignore certain errors here as fdisk will # sometimes return non-zero signalling error conditions # we don't care about. Should fix to detect these cases # specifically. rc = pipe.close() - if rc == None: + if rc == None: rc = 0 log("fdisk exited with rc %d (some non-zero exits can be ignored safely)." % rc) + def map_partitions(lvpath): run("/sbin/kpartx -a %s", lvpath) ps = [] @@ -80,37 +100,42 @@ def map_partitions(lvpath): ps.append("/dev/mapper/" + line.split()[0]) return ps + def unmap_partitions(lvpath): run("/sbin/kpartx -d %s", lvpath) + def umount(mountpoint): - run("umount -l %s",mountpoint) + run("umount -l %s", mountpoint) + if __name__ == "__main__": - #os.setpgrp() + # os.setpgrp() xvda = os.getenv("xvda") xvdb = os.getenv("xvdb") debug("Guest's xvda is on %s" % xvda) debug("Guest's xvdb is on %s" % xvdb) if xvda == None or xvdb == None: - raise "Need to pass in device names for xvda and xvdb through the environment" - + raise ValueError ("Need to pass in device names for xvda and xvdb through the environment") + vm = os.getenv("vm") - server = xapi_local () + server = xapi_local() try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - uuid = server.VM.get_uuid(session_id, vm)['Value'] + session_id = server.session.login_with_password( + "", "", "1.0", "xen-api-scripts-debian" + )["Value"] + uuid = server.VM.get_uuid(session_id, vm)["Value"] mountpoint = "/tmp/installer/%s" % (uuid) finally: server.session.logout(session_id) def sighandler(signum, frame): - umount(mountpoint) - os.killpg(0,signal.SIGKILL) - exit(1) + umount(mountpoint) + os.killpg(0, signal.SIGKILL) + exit(1) - signal.signal(signal.SIGTERM,sighandler) + signal.signal(signal.SIGTERM, sighandler) create_partition(xvda) create_partition(xvdb) @@ -132,10 +157,12 @@ if __name__ == "__main__": run("/usr/bin/unzip -p %s swap.img | dd of=%s oflag=direct bs=1M", xgt, xvdb) try: - session_id = server.session.login_with_password('','','1.0','xen-api-scripts-debian')['Value'] - vbds = server.VM.get_VBDs(session_id, vm)['Value'] + session_id = server.session.login_with_password( + "", "", "1.0", "xen-api-scripts-debian" + )["Value"] + vbds = server.VM.get_VBDs(session_id, vm)["Value"] for i in vbds: - dev = server.VBD.get_userdevice(session_id, i)['Value'] + dev = server.VBD.get_userdevice(session_id, i)["Value"] if dev == "0": server.VBD.set_bootable(session_id, i, True) finally: diff --git a/scripts/templates/debug b/python3/templates/debug similarity index 100% rename from scripts/templates/debug rename to python3/templates/debug diff --git a/scripts/Makefile b/scripts/Makefile index 705b161158a..b068e7a8959 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -145,9 +145,6 @@ install: mkdir -p $(DESTDIR)/etc/cron.d $(IDATA) xapi-tracing-log-trim.cron $(DESTDIR)/etc/cron.d/xapi-tracing-log-trim.cron mkdir -p $(DESTDIR)/opt/xensource/gpg -# templates - $(IPROG) templates/debian $(DESTDIR)$(OPTDIR)/packages/post-install-scripts/debian-etch - $(IPROG) templates/debug $(DESTDIR)$(OPTDIR)/packages/post-install-scripts # host-backup-restore $(IPROG) host-backup-restore/host-backup $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-backup-restore/host-restore $(DESTDIR)$(LIBEXECDIR) From 8199f2d3bf1569c651f76c3c6e273869716ca9ba Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 1 Jul 2024 10:54:29 +0000 Subject: [PATCH 146/341] CP-49912: Moved scripts/mail-alarm to python3/libexec directory - Modified python3/Makefile to include these changes Signed-off-by: Ashwinh --- python3/Makefile | 2 ++ {scripts => python3/libexec}/mail-alarm | 0 scripts/Makefile | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) rename {scripts => python3/libexec}/mail-alarm (100%) diff --git a/python3/Makefile b/python3/Makefile index 8f34cb8e107..1d98e2223ba 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -22,6 +22,8 @@ install: $(IPROG) libexec/nbd_client_manager.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/probe-device-for-file $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/print-custom-templates $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/mail-alarm $(DESTDIR)$(LIBEXECDIR) + $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/mail-alarm b/python3/libexec/mail-alarm similarity index 100% rename from scripts/mail-alarm rename to python3/libexec/mail-alarm diff --git a/scripts/Makefile b/scripts/Makefile index 705b161158a..88d71c98227 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -42,7 +42,6 @@ install: $(IPROG) fence $(DESTDIR)$(LIBEXECDIR) $(IPROG) xha-lc $(DESTDIR)$(LIBEXECDIR) $(IPROG) xapi-health-check $(DESTDIR)$(LIBEXECDIR) - $(IPROG) mail-alarm $(DESTDIR)$(LIBEXECDIR) $(IDATA) audit-logrotate $(DESTDIR)/etc/logrotate.d/audit $(IDATA) xapi-logrotate.conf $(DESTDIR)/etc/logrotate.d/xapi $(IPROG) xapi-tracing-log-trim.sh $(DESTDIR)$(LIBEXECDIR) From 501cc2630dc78b3e00b3f9ee8e38646495974620 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 1 Jul 2024 12:00:00 +0200 Subject: [PATCH 147/341] CP-49928: static-vdis: Fix warnings in def fresh_name() Warning fix (for pyright) in scripts/static-vdis: - fresh_name is covered by github.com/xapi-project/xen-api/pull/5740 Details: Always return a value in fresh_name(): - Was already so, but pyright doesn't "understand" this. - Fix the warning by de-indenting `return 0` of fresh_name() Signed-off-by: Bernhard Kaindl --- scripts/static-vdis | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/static-vdis b/scripts/static-vdis index 9ca8b1d352a..6ea02211e0d 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -115,7 +115,7 @@ def fresh_name(): except: # Directory doesn't exist os.mkdir(main_dir) - return "0" + return "0" # Always return a string, fixes pyright error by not returning None def to_string_list(d): From 017ad4939d11d940140706fb8ae20287bfff7d9b Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 1 Jul 2024 12:00:00 +0200 Subject: [PATCH 148/341] CP-49928: static-vdis: Fix pylint warnings in def fresh_name() - all does not need to be initialized to an empty list: It isn't used before it is set by `all = listdir(...)` and not after. - Use `if i not in files:` not some clumsy method that pylint warns on. Signed-off-by: Bernhard Kaindl --- scripts/static-vdis | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/static-vdis b/scripts/static-vdis index 6ea02211e0d..fada0e14f1f 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -105,12 +105,12 @@ def list_vdis(): return list(map(load, all)) def fresh_name(): - all = [] + """Return a unique name for a new static VDI configuration directory""" try: all = os.listdir(main_dir) for i in range(0, len(all) + 1): # guarantees to find a unique number i = str(i) - if not(i in all): + if i not in all: return i except: # Directory doesn't exist From ef1bb5f9155d15650a16dccf6b4c46440e4677c5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 1 Jul 2024 12:00:00 +0200 Subject: [PATCH 149/341] CP-49928: Rename all to not have warnings on overriding built-in Signed-off-by: Bernhard Kaindl --- scripts/static-vdis | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/static-vdis b/scripts/static-vdis index fada0e14f1f..5a9b60b77be 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -97,20 +97,20 @@ def sr_attach(ty, device_config): return call_volume_plugin(ty, "SR.attach", args) def list_vdis(): - all = [] + files = [] try: - all = os.listdir(main_dir) + files = os.listdir(main_dir) except: pass - return list(map(load, all)) + return list(map(load, files)) def fresh_name(): """Return a unique name for a new static VDI configuration directory""" try: - all = os.listdir(main_dir) - for i in range(0, len(all) + 1): # guarantees to find a unique number + files = os.listdir(main_dir) + for i in range(0, len(files) + 1): # guarantees to find a unique number i = str(i) - if i not in all: + if i not in files: return i except: # Directory doesn't exist From b606836f9064616984261b3e5c06037ec2399c65 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 1 Jul 2024 12:00:00 +0200 Subject: [PATCH 150/341] CP-49928: listdir: except OSError: Possible errors are subclasses of it Signed-off-by: Bernhard Kaindl --- scripts/static-vdis | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/static-vdis b/scripts/static-vdis index 5a9b60b77be..ec24848e934 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -100,7 +100,7 @@ def list_vdis(): files = [] try: files = os.listdir(main_dir) - except: + except OSError: # All possible errors are subclasses of OSError pass return list(map(load, files)) @@ -112,7 +112,7 @@ def fresh_name(): i = str(i) if i not in files: return i - except: + except OSError: # All possible errors are subclasses of OSError # Directory doesn't exist os.mkdir(main_dir) return "0" # Always return a string, fixes pyright error by not returning None From f225cc47da391a5b70a8086f07e8de9b9464b6da Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 2 Jul 2024 12:01:05 +0200 Subject: [PATCH 151/341] CP-49928: test_static_vids.py: mv to py3, test list_vdis & fresh_name (#5740) Signed-off-by: Bernhard Kaindl --- python3/tests/test_static_vdis.py | 85 +++++++++++++++++++++++++++++++ scripts/test_static_vdis.py | 56 -------------------- 2 files changed, 85 insertions(+), 56 deletions(-) create mode 100644 python3/tests/test_static_vdis.py delete mode 100644 scripts/test_static_vdis.py diff --git a/python3/tests/test_static_vdis.py b/python3/tests/test_static_vdis.py new file mode 100644 index 00000000000..ee424c157a1 --- /dev/null +++ b/python3/tests/test_static_vdis.py @@ -0,0 +1,85 @@ +"""python3/tests/test_static_vdis.py: Test the static-vdis script""" + +import os +from pathlib import Path +from types import ModuleType + +import pytest + +from python3.tests.import_helper import import_file_as_module, mocked_modules + +# ---------------------------- Test fixtures --------------------------------- + + +@pytest.fixture(scope="function") # function scope: Re-run for each test function +def static_vdis() -> ModuleType: + """Test fixture to return the static-vdis module, mocked to avoid dependencies.""" + with mocked_modules("XenAPI", "inventory"): + return import_file_as_module("scripts/static-vdis") + + +# Hide pylint warnings for redefined-outer-name from using the static_vdis fixture: +# pylint: disable=redefined-outer-name +# Allow to access attributes of the static_vdis module from this test module: +# pyright: reportAttributeAccessIssue=false + +# ----------------------------- Test cases ----------------------------------- + + +def test_whole_file(static_vdis: ModuleType): + """Test read_whole_file() and write_whole_file()""" + + with open(__file__, encoding="utf-8") as data: + contents = data.read().strip() + assert static_vdis.read_whole_file(__file__) == contents + assert static_vdis.write_whole_file(__file__, contents) is None + with open(__file__, encoding="utf-8") as written_data: + assert written_data.read().strip() == contents + + +def test_fresh_name(static_vdis: ModuleType, tmp_path: Path): + """Test fresh_name() and list_vdis() - all code paths""" + + # When the freshly created tmp_path is empty, expect [] and "0": + static_vdis.main_dir = tmp_path.as_posix() + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + # When main_dir contains a directory with name "0", the next name should be "1": + os.mkdir(static_vdis.main_dir + "/0") + assert static_vdis.fresh_name() == "1" + + # When main_dir contains a directory with name "1", the next name should be "2": + os.mkdir(static_vdis.main_dir + "/1") + assert static_vdis.fresh_name() == "2" + + # When main_dir does not exist, an empty list and 0 should be returned: + static_vdis.main_dir = tmp_path.as_posix() + "/does-not-exist" + assert static_vdis.list_vdis() == [] + assert static_vdis.fresh_name() == "0" + + + +def test_sr_attach(static_vdis: ModuleType, mocker): + """Test sr_attach()""" + + # We need to mock those as they would attempt to load the volume plugin and + # check the clusterstack, which are not available in the test environment: + static_vdis.call_volume_plugin = mocker.MagicMock() + static_vdis.check_clusterstack = mocker.MagicMock() + + # Set the return value of the mocked functions to success: + static_vdis.call_volume_plugin.return_value = "success" + static_vdis.check_clusterstack.return_value = "success" + + # Call the sr_attach function + device_config = {"key1": "value1", "key2": "value2"} + result = static_vdis.sr_attach("plugin_name", device_config) + + # Assert the expected behavior + assert result == "success" + static_vdis.call_volume_plugin.assert_called_once_with( + "plugin_name", + "SR.attach", + ["--configuration", "key1", "value1", "--configuration", "key2", "value2"], + ) \ No newline at end of file diff --git a/scripts/test_static_vdis.py b/scripts/test_static_vdis.py deleted file mode 100644 index b0ab6ad5939..00000000000 --- a/scripts/test_static_vdis.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -# unittest for static-vdis - -import unittest -from mock import MagicMock -import sys -import os -import subprocess -import tempfile - -# mock modules to avoid dependencies -sys.modules["XenAPI"] = MagicMock() -sys.modules["inventory"] = MagicMock() - -def import_from_file(module_name, file_path): - """Import a file as a module""" - if sys.version_info.major == 2: - return None - else: - from importlib import machinery, util - loader = machinery.SourceFileLoader(module_name, file_path) - spec = util.spec_from_loader(module_name, loader) - assert spec - assert spec.loader - module = util.module_from_spec(spec) - # Probably a good idea to add manually imported module stored in sys.modules - sys.modules[module_name] = module - spec.loader.exec_module(module) - return module - -def get_module(): - """Import the static-vdis script as a module for executing unit tests on functions""" - testdir = os.path.dirname(__file__) - return import_from_file("static_vdis", testdir + "/static-vdis") - -static_vdis = get_module() - -@unittest.skipIf(sys.version_info < (3, 0), reason="requires python3") -class TestReadWriteFile(unittest.TestCase): - def test_write_and_read_whole_file(self): - """Test read_whole_file and write_whole_file""" - test_file = tempfile.NamedTemporaryFile(delete=True) - filename = str(test_file.name) - content = r"""def read_whole_file(filename): - with open(filename, 'r', encoding='utf-8') as f: - return ''.join(f.readlines()).strip() - -def write_whole_file(filename, contents): - with open(filename, "w", encoding='utf-8') as f: - f.write(contents)""" - static_vdis.write_whole_file(filename, content) - expected_content = static_vdis.read_whole_file(filename) - self.assertEqual(expected_content, content) - - \ No newline at end of file From d73441d9ca89d83937eacf2084a3e850f0ef8f28 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 2 Jul 2024 12:33:42 +0000 Subject: [PATCH 152/341] CP-49916: Moved perfmon, perfmon.service and sysconfig-perfmon from scripts/ to python3/perfmon directory - Modified Makefile to include these changes. - Removed perfmon, perfmon.service and sysconfig-perfmon from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 7 +++++++ {scripts/plugins => python3/perfmon}/perfmon | 2 +- {scripts => python3/perfmon}/perfmon.service | 0 {scripts => python3/perfmon}/sysconfig-perfmon | 0 scripts/Makefile | 4 ---- 5 files changed, 8 insertions(+), 5 deletions(-) rename {scripts/plugins => python3/perfmon}/perfmon (100%) rename {scripts => python3/perfmon}/perfmon.service (100%) rename {scripts => python3/perfmon}/sysconfig-perfmon (100%) diff --git a/python3/Makefile b/python3/Makefile index 4d97bacc1fa..d31aa4f497d 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -11,6 +11,8 @@ install: $(IPROG) -d $(DESTDIR)$(SITE3_DIR) $(IPROG) -d $(DESTDIR)$(LIBEXECDIR) $(IPROG) -d $(DESTDIR)$(PLUGINDIR) + $(IPROG) -d $(DESTDIR)/etc/sysconfig + $(IPROG) -d $(DESTDIR)/usr/lib/systemd/system $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ @@ -32,6 +34,11 @@ install: $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo + + $(IPROG) perfmon/perfmon $(DESTDIR)$(PLUGINDIR) + $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service + $(IPROG) perfmon/sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon + # templates $(IPROG) templates/debian $(DESTDIR)$(OPTDIR)/packages/post-install-scripts/debian-etch $(IPROG) templates/debug $(DESTDIR)$(OPTDIR)/packages/post-install-scripts diff --git a/scripts/plugins/perfmon b/python3/perfmon/perfmon similarity index 100% rename from scripts/plugins/perfmon rename to python3/perfmon/perfmon index e3dc2452691..c40eb659cf6 100644 --- a/scripts/plugins/perfmon +++ b/python3/perfmon/perfmon @@ -2,9 +2,9 @@ # # A plugin for requesting perfmon actions via the xe host-call-plugin mechanism -import XenAPIPlugin import os import socket +import XenAPIPlugin # TODO: put this info plus all the supported cmds in a shared file cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) diff --git a/scripts/perfmon.service b/python3/perfmon/perfmon.service similarity index 100% rename from scripts/perfmon.service rename to python3/perfmon/perfmon.service diff --git a/scripts/sysconfig-perfmon b/python3/perfmon/sysconfig-perfmon similarity index 100% rename from scripts/sysconfig-perfmon rename to python3/perfmon/sysconfig-perfmon diff --git a/scripts/Makefile b/scripts/Makefile index 0234d6ffd1a..91b232a834e 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -61,7 +61,6 @@ install: $(IDATA) cdrommon@.service $(DESTDIR)/usr/lib/systemd/system/cdrommon@.service $(IDATA) gencert.service $(DESTDIR)/usr/lib/systemd/system/gencert.service $(IDATA) xapi-domains.service $(DESTDIR)/usr/lib/systemd/system/xapi-domains.service - $(IDATA) perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service $(IDATA) generate-iscsi-iqn.service $(DESTDIR)/usr/lib/systemd/system/generate-iscsi-iqn.service $(IDATA) xapi.service $(DESTDIR)/usr/lib/systemd/system/xapi.service $(IDATA) attach-static-vdis.service $(DESTDIR)/usr/lib/systemd/system/attach-static-vdis.service @@ -115,14 +114,11 @@ install: $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) - mkdir -p $(DESTDIR)/etc/sysconfig - $(IPROG) sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon mkdir -p $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/perfmon $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) From f68e89737d82f09e4d27ae2ecf84606c3d019069 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 2 Jul 2024 12:00:00 +0200 Subject: [PATCH 153/341] CP-49928: Add code coverage for static-vdis:attach() to fix warnings Signed-off-by: Bernhard Kaindl --- python3/tests/test_static_vdis.py | 59 ++++++++++++++++++++++++++++--- scripts/static-vdis | 9 +++-- 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/python3/tests/test_static_vdis.py b/python3/tests/test_static_vdis.py index ee424c157a1..1b7efc0bcf0 100644 --- a/python3/tests/test_static_vdis.py +++ b/python3/tests/test_static_vdis.py @@ -1,6 +1,7 @@ """python3/tests/test_static_vdis.py: Test the static-vdis script""" import os +import sys from pathlib import Path from types import ModuleType @@ -26,17 +27,66 @@ def static_vdis() -> ModuleType: # ----------------------------- Test cases ----------------------------------- -def test_whole_file(static_vdis: ModuleType): +def test_whole_file(static_vdis: ModuleType, tmp_path): """Test read_whole_file() and write_whole_file()""" with open(__file__, encoding="utf-8") as data: contents = data.read().strip() assert static_vdis.read_whole_file(__file__) == contents - assert static_vdis.write_whole_file(__file__, contents) is None - with open(__file__, encoding="utf-8") as written_data: + assert static_vdis.write_whole_file(tmp_path / "temp_file", contents) is None + with open(tmp_path / "temp_file", encoding="utf-8") as written_data: assert written_data.read().strip() == contents +def test_attach(static_vdis: ModuleType, tmpdir, mocker, capsys): + """Test five common and SMAPIv1 code paths in the attach() function""" + + # Path 1: When the VDI is not found, expect attach() to raise an exception: + static_vdis.list_vdis = lambda: [{"vdi-uuid": "existing-uuid"}] + with pytest.raises(Exception) as exc_info: + static_vdis.attach("nonexisting-uuid") + assert exc_info.value.args[0] == "Disk configuration not found" + + # Path 2: When the VDI is already attached, expect main():attach to return None\n: + static_vdis.list_vdis = lambda: [{"vdi-uuid": "attached", "path": "/attached"}] + sys.argv = ["static-vdis", "attach", "attached"] + static_vdis.main() + with capsys.disabled(): + assert capsys.readouterr().out == "None\n" + + # Path 3: When the VDI is not attached, attach() to return "the-id/disk": + vdis: list[dict[str, str]] = [{"vdi-uuid": "attach-uuid", "id": "the-id"}] + static_vdis.list_vdis = lambda: vdis + static_vdis.call_backend_attach = lambda driver, config: "/mock-attached-path" + static_vdis.read_whole_file = lambda path: '{"json":true}' + disk = tmpdir.mkdir(vdis[0]["id"]).join("disk") + static_vdis.main_dir = str(tmpdir) + assert static_vdis.attach("attach-uuid") == disk + assert os.readlink(disk) == "/mock-attached-path" + os.unlink(disk) + + # Path 4: Create the disk file expect it to be deleted and replaced by a symlink: + disk.write("mock-disk-contents-to-delete") + assert static_vdis.attach("attach-uuid") == disk + assert os.readlink(disk) == "/mock-attached-path" + + # Path 5: When the backend call returns None, expect attach() to raise TypeError + static_vdis.call_backend_attach = lambda driver, config: None + with pytest.raises(TypeError) as exc_info: + static_vdis.attach("attach-uuid") + + # Path 6: When the backend returns an empty str, attach() raises FileNotFoundError: + static_vdis.call_backend_attach = lambda driver, config: "" + with pytest.raises(FileNotFoundError) as exc_info: + static_vdis.attach("attach-uuid") + + # Path 7: If the smapiv3_config exists, but not the volume plugin, attach() fails: + with pytest.raises(FileNotFoundError) as exc_info: + mocker.patch("os.path.exists", return_value=True) + static_vdis.MULTIPATH_FLAG = __file__ + static_vdis.attach("attach-uuid") + + def test_fresh_name(static_vdis: ModuleType, tmp_path: Path): """Test fresh_name() and list_vdis() - all code paths""" @@ -59,7 +109,6 @@ def test_fresh_name(static_vdis: ModuleType, tmp_path: Path): assert static_vdis.fresh_name() == "0" - def test_sr_attach(static_vdis: ModuleType, mocker): """Test sr_attach()""" @@ -82,4 +131,4 @@ def test_sr_attach(static_vdis: ModuleType, mocker): "plugin_name", "SR.attach", ["--configuration", "key1", "value1", "--configuration", "key2", "value2"], - ) \ No newline at end of file + ) diff --git a/scripts/static-vdis b/scripts/static-vdis index ec24848e934..d94a5282ac1 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -375,8 +375,9 @@ def usage(): print(" %s attach -- attach the VDI immediately" % sys.argv[0]) print(" %s detach -- detach the VDI immediately" % sys.argv[0]) sys.exit(1) - -if __name__ == "__main__": + + +def main(): if len(sys.argv) < 2: usage() @@ -401,3 +402,7 @@ if __name__ == "__main__": detach(sys.argv[2]) else: usage() + + +if __name__ == "__main__": # pragma: no cover + main() From bff2d07b4f3b59739a14dd30bdc55ce7de414393 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 2 Jul 2024 12:00:00 +0200 Subject: [PATCH 154/341] static-vdis: pyright: Add type hints and fix errors from pyright Signed-off-by: Bernhard Kaindl --- pyproject.toml | 2 ++ scripts/static-vdis | 19 ++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 83a54c6d978..5dd6d1ee8e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,8 @@ exclude_lines = [ # Other specific lines that do not need to be covered, comment in which file: "raise NbdDeviceNotFound", # python3/libexec/usb_scan.py "params = xmlrpc.client.loads", # static-vdis + "assert.*# must not be None", # static-vdis + "except Exception:", # static-vdis ] # precision digits to use when reporting coverage (sub-percent-digits are not reported): precision = 0 diff --git a/scripts/static-vdis b/scripts/static-vdis index d94a5282ac1..ff3a01da596 100755 --- a/scripts/static-vdis +++ b/scripts/static-vdis @@ -11,7 +11,7 @@ import sys import time import urllib.parse import xmlrpc.client -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast import XenAPI @@ -166,6 +166,7 @@ def add(session, vdi_uuid, reason): sm = None all_sm = session.xenapi.SM.get_all_records() + sm_ref = "" # pragma: no cover for sm_ref in all_sm: if all_sm[sm_ref]['type'] == ty: sm = all_sm[sm_ref] @@ -183,6 +184,7 @@ def add(session, vdi_uuid, reason): if "VDI_ATTACH_OFFLINE" in sm["features"]: data["volume-plugin"] = ty data[smapiv3_config] = json.dumps(device_config) + assert device_config # must not be None sr = sr_attach(ty, device_config) location = session.xenapi.VDI.get_location(vdi) stat = call_volume_plugin(ty, "Volume.stat", [ sr, location ]) @@ -238,7 +240,7 @@ def call_backend_attach(driver, config): xml = doexec(args) if xml[0] != 0: raise Exception("SM_BACKEND_FAILURE(%d, %s, %s)" % xml) - xml_rpc = xmlrpc.client.loads(xml[1]) + xml_rpc = xmlrpc.client.loads(xml[1]) # type: Any # pragma: no cover if 'params_nbd' in xml_rpc[0][0]: # Prefer NBD if available @@ -259,8 +261,8 @@ def call_backend_detach(driver, config): raise Exception("SM_BACKEND_FAILURE(%d, %s, %s)" % xml) xml_rpc = xmlrpc.client.loads(xml[1]) try: - res = xml_rpc[0][0]['params'] - except: + res = cast(dict, xml_rpc[0][0])['params'] # pragma: no cover + except Exception: res = xml_rpc[0][0] return res @@ -301,7 +303,7 @@ def attach(vdi_uuid): os.unlink(d + "/disk") except: pass - path = None + path = None # Raise TypeError if path is not set at the end if not (os.path.exists(d + "/" + smapiv3_config)): # SMAPIv1 config = read_whole_file(d + "/config") @@ -333,10 +335,13 @@ def attach(vdi_uuid): (path, exportname) = parse_nbd_uri(uri) path = connect_nbd(path=path, exportname=exportname) + if path is None: + raise TypeError("static-vdis: attach(): path was not set") os.symlink(path, d + "/disk") return d + "/disk" if not found: raise Exception("Disk configuration not found") + return None def detach(vdi_uuid): found = False @@ -396,8 +401,8 @@ def main(): elif sys.argv[1] == "del" and len(sys.argv) == 3: delete(sys.argv[2]) elif sys.argv[1] == "attach" and len(sys.argv) == 3: - path = attach(sys.argv[2]) - print(path) + disk_path = attach(sys.argv[2]) + print(disk_path) elif sys.argv[1] == "detach" and len(sys.argv) == 3: detach(sys.argv[2]) else: From 849a517f3077294c2343c6503d526368bb481d60 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 3 Jul 2024 08:57:09 +0000 Subject: [PATCH 155/341] CP-49915: Moved openvswitch-config-update from scripts/plugins to python3/plugins/ - Modified Makefile to include this change - Removed openvswitch-config-update from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3}/plugins/openvswitch-config-update | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 1 deletion(-) rename {scripts => python3}/plugins/openvswitch-config-update (100%) diff --git a/python3/Makefile b/python3/Makefile index d31aa4f497d..4e65f81ad38 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -34,6 +34,7 @@ install: $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo + $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) $(IPROG) perfmon/perfmon $(DESTDIR)$(PLUGINDIR) $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service diff --git a/scripts/plugins/openvswitch-config-update b/python3/plugins/openvswitch-config-update similarity index 100% rename from scripts/plugins/openvswitch-config-update rename to python3/plugins/openvswitch-config-update diff --git a/scripts/Makefile b/scripts/Makefile index 91b232a834e..08e7d7a517b 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -122,7 +122,6 @@ install: $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead $(IPROG) 10resetvdis $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead mkdir -p $(DESTDIR)/etc/bash_completion.d From 29e55748cbb96138c862bf4c5e5df141ec9c3046 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 3 Jul 2024 13:22:24 +0000 Subject: [PATCH 156/341] CP-50172: Moved Test.test from scripts/extensions to python3/extensions - Modified python3/Makefile to include Test.test - Removed Test.test from scripts/Makefile - Fixed import order Signed-off-by: Ashwinh --- python3/Makefile | 4 ++++ python3/extensions/Test.test | 22 ++++++++++++++++++++++ scripts/Makefile | 1 - scripts/extensions/Test.test | 20 -------------------- 4 files changed, 26 insertions(+), 21 deletions(-) create mode 100755 python3/extensions/Test.test delete mode 100755 scripts/extensions/Test.test diff --git a/python3/Makefile b/python3/Makefile index d31aa4f497d..fb24ea25fd2 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -14,6 +14,8 @@ install: $(IPROG) -d $(DESTDIR)/etc/sysconfig $(IPROG) -d $(DESTDIR)/usr/lib/systemd/system + $(IPROG) -d $(DESTDIR)$(EXTENSIONDIR) + $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ @@ -31,6 +33,8 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin + + $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo diff --git a/python3/extensions/Test.test b/python3/extensions/Test.test new file mode 100755 index 00000000000..372de668b8c --- /dev/null +++ b/python3/extensions/Test.test @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + + +import sys +import xmlrpc.client + + +def success_message(result): + rpcparams = {"Status": "Success", "Value": result} + return xmlrpc.client.dumps((rpcparams,), "", True) + + +def failure_message(code, params): + rpcparams = {"Status": "Failure", "ErrorDescription": [code] + params} + return xmlrpc.client.dumps((rpcparams,), "", True) + + +if __name__ == "__main__": + txt = sys.stdin.read() + req = xmlrpc.client.loads(txt) + print(failure_message("CODE", ["a", "b"])) + # print (success_message("")) diff --git a/scripts/Makefile b/scripts/Makefile index 91b232a834e..8f64e908f70 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -115,7 +115,6 @@ install: $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) diff --git a/scripts/extensions/Test.test b/scripts/extensions/Test.test deleted file mode 100755 index f49f8c22e07..00000000000 --- a/scripts/extensions/Test.test +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 - - -import xmlrpc.client, sys - -def success_message(result): - rpcparams = { 'Status': 'Success', 'Value': result } - return xmlrpc.client.dumps((rpcparams, ), '', True) - -def failure_message(code, params): - rpcparams = { 'Status': 'Failure', 'ErrorDescription': [ code ] + params } - return xmlrpc.client.dumps((rpcparams, ), '', True) - -if __name__ == "__main__": - txt = sys.stdin.read() - req = xmlrpc.client.loads(txt) - print (failure_message("CODE", [ "a", "b" ])) - #print (success_message("")) - - From aeae774a41e06efcefc9905f6f9d9904f90cae01 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 4 Jul 2024 13:50:45 +0000 Subject: [PATCH 157/341] CP-49926: Removed shell.py from scripts/examples/python Signed-off-by: Ashwinh --- scripts/Makefile | 1 - scripts/examples/python/shell.py | 120 ------------------------------- 2 files changed, 121 deletions(-) delete mode 100644 scripts/examples/python/shell.py diff --git a/scripts/Makefile b/scripts/Makefile index 7d5e13ce954..6e4fe678471 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -149,7 +149,6 @@ endif $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ - $(IPROG) examples/python/shell.py $(DESTDIR)$(LIBEXECDIR)/shell.py # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) $(IDATA) yum-plugins/accesstoken.conf $(DESTDIR)$(YUMPLUGINCONFDIR) diff --git a/scripts/examples/python/shell.py b/scripts/examples/python/shell.py deleted file mode 100644 index 3cfdde757db..00000000000 --- a/scripts/examples/python/shell.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2006-2008 Citrix Systems. -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import print_function -import atexit -import cmd -import pprint -import readline -import shlex -import string -import sys - -import XenAPI - -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) - -class Shell(cmd.Cmd): - def __init__(self): - cmd.Cmd.__init__(self) - self.identchars = string.ascii_letters + string.digits + '_.' - self.prompt = "xe> " - - def preloop(self): - cmd.Cmd.preloop(self) - readline.set_completer_delims(' ') - - def default(self, line): - words = shlex.split(line) - if len(words) > 0: - res = session.xenapi_request(words[0], tuple(words[1:])) - if res is not None and res != '': - pprint.pprint(res) - return False - - def completedefault(self, text, line, begidx, endidx): - words = shlex.split(line[:begidx]) - clas, func = words[0].split('.') - if len(words) > 1 or \ - func.startswith('get_by_') or \ - func == 'get_all': - return [] - uuids = session.xenapi_request('%s.get_all' % clas, ()) - return [u + " " for u in uuids if u.startswith(text)] - - def emptyline(self): - pass - - def do_EOF(self, line): - print() - sys.exit(0) - - -def munge_types(var): - if var == "True": - return True - if var == "False": - return False - - try: - return int(var) - except: - return var - - -if __name__ == "__main__": - if len(sys.argv) < 2: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - if sys.argv[1] != "-" and len(sys.argv) < 4: - print("Usage:") - print(sys.argv[0], " ") - sys.exit(1) - - if sys.argv[1] != "-": - url = sys.argv[1] - username = sys.argv[2] - password = sys.argv[3] - session = XenAPI.Session(url) - session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-shell.py") - cmdAt = 4 - else: - session = XenAPI.xapi_local() - session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-shell.py") - cmdAt = 2 - - # We want to support directly executing the cmd line, - # where appropriate - if len(sys.argv) > cmdAt: - command = sys.argv[cmdAt] - params = [munge_types(x) for x in sys.argv[(cmdAt + 1):]] - try: - print(session.xenapi_request(command, tuple(params)), file=sys.stdout) - except XenAPI.Failure as x: - print(x, file=sys.stderr) - sys.exit(2) - except Exception as e: - print(e, file=sys.stderr) - sys.exit(3) - sys.exit(0) - else: - Shell().cmdloop('Welcome to the XenServer shell. (Try "VM.get_all")') From 3b26dce21028cae3daac1792a22332bc4583693e Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 4 Jul 2024 12:00:00 +0200 Subject: [PATCH 158/341] Stop testing scripts/plugins/extauth-hook-AD.py with Python2.7 Preparations for cleaning up the checker warnings in extauth-hook-AD.py: 1. The shebang of extauth-hook-AD.py has already been changed to Python3: Thus, stop testing it with Python3. 2. This drops the Python2 code coverage to 28% (below 50%). We need to allow further drops in coverage: Set the limit to 0. Signed-off-by: Bernhard Kaindl --- .github/workflows/other.yml | 1 + scripts/plugins/test_extauth_hook_AD.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 959679a92b3..17f91991da5 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -55,6 +55,7 @@ jobs: --junitxml=.git/pytest${{matrix.python-version}}.xml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml + --cov-fail-under 0 env: PYTHONDEVMODE: yes diff --git a/scripts/plugins/test_extauth_hook_AD.py b/scripts/plugins/test_extauth_hook_AD.py index 71b5b7c95eb..1960072f3f1 100644 --- a/scripts/plugins/test_extauth_hook_AD.py +++ b/scripts/plugins/test_extauth_hook_AD.py @@ -6,6 +6,9 @@ import os from unittest import TestCase from mock import MagicMock, patch + +import pytest + # mock modules to avoid dependencies sys.modules["XenAPIPlugin"] = MagicMock() sys.modules["XenAPI"] = MagicMock() @@ -14,6 +17,10 @@ from extauth_hook_ad import StaticSSHPam, NssConfig, SshdConfig, UsersList, GroupsList +if sys.version_info < (3, ): # pragma: no cover + pytest.skip(allow_module_level=True) + + def line_exists_in_config(lines, line): """ Helper function to detect whether configration match expectation From 44fdc3d67e5d407dd72ee929ee35b15c10688e61 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 4 Jul 2024 12:00:00 +0200 Subject: [PATCH 159/341] extauth-hook-AD.py: Fix `pytype` warnings, cleanup `pylint` suppressions 1. `pytype` warnings need to be fixed before it can be moved to Python3. - `pytype` reports the uses `@abc.abstractmethod` as stray, disable. - Initialise `upn`: checkers (`pytype`, `pyright`) can't see that it is already handled OK. 2. Clean-up obsolete inheriting from `object`: In Python3, all classes already always inherit from `object`. Signed-off-by: Bernhard Kaindl --- pyproject.toml | 2 -- scripts/plugins/extauth-hook-AD.py | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5dd6d1ee8e5..6f8e1095dc9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -263,8 +263,6 @@ expected_to_fail = [ # Need 2to3 -w and maybe a few other minor updates: "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", - # Other fixes needed: - "scripts/plugins/extauth-hook-AD.py", ] # ----------------------------------------------------------------------------- diff --git a/scripts/plugins/extauth-hook-AD.py b/scripts/plugins/extauth-hook-AD.py index 98b228c04e5..0474ecacd39 100755 --- a/scripts/plugins/extauth-hook-AD.py +++ b/scripts/plugins/extauth-hook-AD.py @@ -29,7 +29,8 @@ # - /etc/pam.d/hcp_users # - /etc/ssh/ssh_config -# pylint: disable=super-with-arguments +# pylint: disable=too-few-public-methods +# pytype: disable=ignored-abstractmethod HCP_USERS = "/etc/security/hcp_ad_users.conf" @@ -81,10 +82,8 @@ class ADBackend(Enum): BD_WINBIND = 1 -# pylint: disable=useless-object-inheritance, too-few-public-methods -class ADConfig(object): +class ADConfig(): """Base class for AD configuration""" - #pylint: disable=too-many-arguments def __init__(self, path, session, args, ad_enabled=True, load_existing=True, file_mode=0o644): self._file_path = path @@ -257,6 +256,7 @@ def _match_subject(self, subject_rec): def _add_upn(self, subject_rec): sep = "@" + upn = "" try: upn = subject_rec["other_config"]["subject-upn"] user, domain = upn.split(sep) From 231a34e046f8c251ac5b070d199db1a68bde5ef7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 8 Jul 2024 09:52:16 +0000 Subject: [PATCH 160/341] CP-49918: Moved pool_update.apply from scripts/extensions to python3/extensions - Modified python3/Makefile to include pool_update.apply - Removed pool_update.apply from scripts/Makefile - Used isort to sort import order and used black code formatter Signed-off-by: Ashwinh --- python3/Makefile | 2 +- .../extensions/pool_update.apply | 112 +++++++++++------- scripts/Makefile | 1 - 3 files changed, 68 insertions(+), 47 deletions(-) rename {scripts => python3}/extensions/pool_update.apply (61%) diff --git a/python3/Makefile b/python3/Makefile index ffdbe9c2481..15e0a27b57a 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -13,7 +13,6 @@ install: $(IPROG) -d $(DESTDIR)$(PLUGINDIR) $(IPROG) -d $(DESTDIR)/etc/sysconfig $(IPROG) -d $(DESTDIR)/usr/lib/systemd/system - $(IPROG) -d $(DESTDIR)$(EXTENSIONDIR) @@ -33,6 +32,7 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin + $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) diff --git a/scripts/extensions/pool_update.apply b/python3/extensions/pool_update.apply similarity index 61% rename from scripts/extensions/pool_update.apply rename to python3/extensions/pool_update.apply index ab8f49478dc..2bf9e0a5dcc 100644 --- a/scripts/extensions/pool_update.apply +++ b/python3/extensions/pool_update.apply @@ -1,83 +1,103 @@ #!/usr/bin/env python3 -import xmlrpc.client -import sys -import XenAPI +import errno import json -import traceback -import subprocess +import logging import os import re -import fasteners -import errno import shutil -import logging +import subprocess +import sys +import traceback +import xmlrpc.client + +import fasteners import xcp.logger +import XenAPI + +TMP_DIR = "/tmp/" +UPDATE_ALREADY_APPLIED = "UPDATE_ALREADY_APPLIED" +UPDATE_APPLY_FAILED = "UPDATE_APPLY_FAILED" +OTHER_OPERATION_IN_PROGRESS = "OTHER_OPERATION_IN_PROGRESS" +UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR = "UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR" +CANNOT_FIND_UPDATE = "CANNOT_FIND_UPDATE" +INVALID_UPDATE = "INVALID_UPDATE" +ERROR_MESSAGE_DOWNLOAD_PACKAGE = "Error downloading packages:\n" +ERROR_MESSAGE_START = "Error: " +ERROR_MESSAGE_END = "You could try " -TMP_DIR = '/tmp/' -UPDATE_ALREADY_APPLIED = 'UPDATE_ALREADY_APPLIED' -UPDATE_APPLY_FAILED = 'UPDATE_APPLY_FAILED' -OTHER_OPERATION_IN_PROGRESS = 'OTHER_OPERATION_IN_PROGRESS' -UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR = 'UPDATE_PRECHECK_FAILED_UNKNOWN_ERROR' -CANNOT_FIND_UPDATE = 'CANNOT_FIND_UPDATE' -INVALID_UPDATE = 'INVALID_UPDATE' -ERROR_MESSAGE_DOWNLOAD_PACKAGE = 'Error downloading packages:\n' -ERROR_MESSAGE_START = 'Error: ' -ERROR_MESSAGE_END = 'You could try ' class EnvironmentFailure(Exception): pass + class ApplyFailure(Exception): pass + class InvalidUpdate(Exception): pass + def success_message(): - rpcparams = {'Status': 'Success', 'Value': ''} - return xmlrpc.client.dumps((rpcparams, ), '', True) + rpcparams = {"Status": "Success", "Value": ""} + return xmlrpc.client.dumps((rpcparams,), "", True) def failure_message(code, params): - rpcparams = { - 'Status': 'Failure', 'ErrorDescription': [code] + params} - return xmlrpc.client.dumps((rpcparams, ), '', True) + rpcparams = {"Status": "Failure", "ErrorDescription": [code] + params} + return xmlrpc.client.dumps((rpcparams,), "", True) def execute_apply(session, update_package, yum_conf_file): yum_env = os.environ.copy() - yum_env['LANG'] = 'C' - - cmd = ['yum', 'clean', 'all', '--noplugins', '-c', yum_conf_file] - p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, env=yum_env, universal_newlines=True) + yum_env["LANG"] = "C" + + cmd = ["yum", "clean", "all", "--noplugins", "-c", yum_conf_file] + p = subprocess.Popen( + cmd, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + close_fds=True, + env=yum_env, + universal_newlines=True, + ) output, _ = p.communicate() - for line in output.split('\n'): + for line in output.split("\n"): xcp.logger.info(line) if p.returncode != 0: raise EnvironmentFailure("Error cleaning yum cache") - cmd = ['yum', 'upgrade', '-y', '--noplugins', '-c', yum_conf_file, update_package] - p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, env=yum_env, universal_newlines=True) + cmd = ["yum", "upgrade", "-y", "--noplugins", "-c", yum_conf_file, update_package] + p = subprocess.Popen( + cmd, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + close_fds=True, + env=yum_env, + universal_newlines=True, + ) output, _ = p.communicate() - xcp.logger.info('pool_update.apply %r returncode=%r output:', cmd, p.returncode) - for line in output.split('\n'): + xcp.logger.info("pool_update.apply %r returncode=%r output:", cmd, p.returncode) + for line in output.split("\n"): xcp.logger.info(line) if p.returncode != 0: if ERROR_MESSAGE_DOWNLOAD_PACKAGE in output: - raise InvalidUpdate('Missing package(s) in the update.') + raise InvalidUpdate("Missing package(s) in the update.") - m = re.search('(?<=' + ERROR_MESSAGE_START + ').+$', output, flags=re.DOTALL) + m = re.search("(?<=" + ERROR_MESSAGE_START + ").+$", output, flags=re.DOTALL) if m: errmsg = m.group() - errmsg = re.sub(ERROR_MESSAGE_END + '.+', '', errmsg, flags=re.DOTALL) + errmsg = re.sub(ERROR_MESSAGE_END + ".+", "", errmsg, flags=re.DOTALL) raise ApplyFailure(errmsg) else: raise ApplyFailure(output) -if __name__ == '__main__': +if __name__ == "__main__": xcp.logger.logToSyslog(level=logging.INFO) txt = sys.stdin.read() params, method = xmlrpc.client.loads(txt) @@ -86,27 +106,29 @@ if __name__ == '__main__': lock_acquired = False try: session = XenAPI.xapi_local() - session.xenapi.login_with_password('root', '', '', 'Pool_update') + session.xenapi.login_with_password("root", "", "", "Pool_update") update = params[1] host = params[2] # Check if the update has been applied. if update in session.xenapi.host.get_updates(host): - print(failure_message( - UPDATE_ALREADY_APPLIED, [update])) + print(failure_message(UPDATE_ALREADY_APPLIED, [update])) sys.exit(0) update_uuid = session.xenapi.pool_update.get_uuid(update) - yum_conf_file = os.path.join(TMP_DIR, update_uuid, 'yum.conf') + yum_conf_file = os.path.join(TMP_DIR, update_uuid, "yum.conf") # To prevent the race condition of invoking apply, set a lock. - lock_file = os.path.join(TMP_DIR, update_uuid + '.lck') + lock_file = os.path.join(TMP_DIR, update_uuid + ".lck") lock = fasteners.InterProcessLock(lock_file) lock_acquired = lock.acquire(blocking=False) if not lock_acquired: - print(failure_message( - OTHER_OPERATION_IN_PROGRESS, ['Applying the update', update])) + print( + failure_message( + OTHER_OPERATION_IN_PROGRESS, ["Applying the update", update] + ) + ) sys.exit(0) # Run precheck @@ -136,10 +158,10 @@ if __name__ == '__main__': pass else: raise - with open (yum_conf_file, "w+") as file: + with open(yum_conf_file, "w+") as file: file.write("{0}".format(yum_conf)) - execute_apply(session, '@update', yum_conf_file) + execute_apply(session, "@update", yum_conf_file) session.xenapi.pool_update.resync_host(host) print(success_message()) diff --git a/scripts/Makefile b/scripts/Makefile index 6e4fe678471..8d3196ceece 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -116,7 +116,6 @@ install: $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) From 2a2d233254f24235cd7b38e454f973e2870ca0c5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 8 Jul 2024 12:00:00 +0200 Subject: [PATCH 161/341] CP-50100: backup-sr-metadata.py: apply 2to3 and change shebang to python3 Signed-off-by: Bernhard Kaindl --- pyproject.toml | 1 - scripts/backup-sr-metadata.py | 22 +++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6f8e1095dc9..07515dc95a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -261,7 +261,6 @@ discard_messages_matching = [ ] expected_to_fail = [ # Need 2to3 -w and maybe a few other minor updates: - "scripts/backup-sr-metadata.py", "scripts/restore-sr-metadata.py", ] diff --git a/scripts/backup-sr-metadata.py b/scripts/backup-sr-metadata.py index 2464d5c8761..346c636b8f0 100644 --- a/scripts/backup-sr-metadata.py +++ b/scripts/backup-sr-metadata.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # Back up the SR metadata and VDI list into an XML file # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 @@ -7,7 +7,7 @@ import sys import getopt import codecs -from xml.dom.minidom import Document +from xml.dom.minidom import Document # pytype: disable=pyi-error def logout(): try: @@ -17,11 +17,11 @@ def logout(): atexit.register(logout) def usage(): - print >> sys.stderr, "%s [-f ]" % sys.argv[0] + print("%s [-f ]" % sys.argv[0], file=sys.stderr) sys.exit(1) def set_if_exists(xml, record, key): - if record.has_key(key): + if key in record: xml.setAttribute(key, record[key]) else: xml.setAttribute(key, "") @@ -32,8 +32,8 @@ def main(argv): try: opts, args = getopt.getopt(argv, "hf:", []) - except getopt.GetoptError, err: - print str(err) + except getopt.GetoptError as err: + print(str(err)) usage() outfile = None @@ -60,18 +60,18 @@ def main(argv): set_if_exists(srxml, srrec, 'uuid') set_if_exists(srxml, srrec, 'name_label') set_if_exists(srxml, srrec, 'name_description') - + for vdiref in srrec['VDIs']: - try: + try: vdirec = session.xenapi.VDI.get_record(vdiref) vdixml = doc.createElement("vdi") set_if_exists(vdixml, vdirec, 'uuid') set_if_exists(vdixml, vdirec, 'name_label') set_if_exists(vdixml, vdirec, 'name_description') srxml.appendChild(vdixml) - except: - print >> sys.stderr, "Failed to get VDI record for: %s" % vdiref - + except Exception: + print("Failed to get VDI record for: %s" % vdiref, file=sys.stderr) + metaxml.appendChild(srxml) doc.writexml(f, encoding="utf-8") From 8353dae1bdc8c1647ceb7ca482f8afceed04b983 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 8 Jul 2024 12:00:00 +0200 Subject: [PATCH 162/341] CP-50100: backup-sr-metadata.py: Fix pyright and pylint to prepare move Signed-off-by: Bernhard Kaindl --- python3/stubs/XenAPI.pyi | 3 +++ scripts/backup-sr-metadata.py | 38 ++++++++++++++++++----------------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/python3/stubs/XenAPI.pyi b/python3/stubs/XenAPI.pyi index 4590e614814..bde962b0556 100644 --- a/python3/stubs/XenAPI.pyi +++ b/python3/stubs/XenAPI.pyi @@ -42,9 +42,12 @@ class _Dispatcher: """Authenticate the session with the XenAPI server.""" def logout(self) -> None: """End the session with the XenAPI server.""" + + # Dynamic attributes that type checkers like pytype and pyright cannot check: session: Incomplete secret: Incomplete SR: Incomplete + VDI: Incomplete PBD: Incomplete pool: Incomplete VM: Incomplete diff --git a/scripts/backup-sr-metadata.py b/scripts/backup-sr-metadata.py index 346c636b8f0..8f83a9b06cb 100644 --- a/scripts/backup-sr-metadata.py +++ b/scripts/backup-sr-metadata.py @@ -3,52 +3,56 @@ # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 import atexit -import XenAPI -import sys -import getopt import codecs +import contextlib +import getopt +import sys from xml.dom.minidom import Document # pytype: disable=pyi-error -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) +import XenAPI + def usage(): print("%s [-f ]" % sys.argv[0], file=sys.stderr) - sys.exit(1) + def set_if_exists(xml, record, key): if key in record: xml.setAttribute(key, record[key]) else: xml.setAttribute(key, "") - + def main(argv): session = XenAPI.xapi_local() + + def logout(): + with contextlib.suppress(Exception): + session.xenapi.session.logout() + + atexit.register(logout) + session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-backup-sr-metadata") try: - opts, args = getopt.getopt(argv, "hf:", []) + opts, _ = getopt.getopt(argv, "hf:", []) except getopt.GetoptError as err: - print(str(err)) + print(err) usage() + sys.exit(1) outfile = None for o,a in opts: if o == "-f": outfile = a - if outfile == None: + if outfile is None: usage() + sys.exit(1) f = codecs.open(outfile, 'w', encoding="utf-8") srs = session.xenapi.SR.get_all_records() - vdis = session.xenapi.SR.get_all_records() - + doc = Document() metaxml = doc.createElement("meta") @@ -80,5 +84,3 @@ def main(argv): if __name__ == "__main__": main(sys.argv[1:]) - - From ef27a66102f10ca83afee82866cd72c349c16708 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 8 Jul 2024 12:00:00 +0200 Subject: [PATCH 163/341] Improve spelling and fix typos in Python3 scripts Signed-off-by: Bernhard Kaindl --- python3/bin/perfmon | 2 +- python3/libexec/nbd_client_manager.py | 2 +- python3/packages/observer.py | 8 ++++---- python3/perfmon/perfmon | 2 +- scripts/examples/python/XenAPIPlugin.py | 2 +- scripts/plugins/extauth-hook-AD.py | 8 ++++---- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/python3/bin/perfmon b/python3/bin/perfmon index e5c6741b2d3..58be93284d7 100644 --- a/python3/bin/perfmon +++ b/python3/bin/perfmon @@ -27,7 +27,7 @@ # based on the "start" CGI param. It will return the highest level of granularity # available for the period requested. # -# The "cf" CGI param specfies the row. (All rows are returned if it's missing.) +# The "cf" CGI param specifies the row. If it is not set, all rows are returned. # pylint: disable=too-many-lines, missing-class-docstring # pytype: disable=attribute-error diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index 0f77e69b12e..d0655df9756 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -96,7 +96,7 @@ def _call(cmd_args, error=True): if error and proc.returncode != 0: LOGGER.error( - "%s exitted with code %d: %s", " ".join(cmd_args), proc.returncode, stderr + "%s exited with code %d: %s", " ".join(cmd_args), proc.returncode, stderr ) raise subprocess.CalledProcessError( diff --git a/python3/packages/observer.py b/python3/packages/observer.py index 4b3451dbec3..1651eb5b4d8 100644 --- a/python3/packages/observer.py +++ b/python3/packages/observer.py @@ -33,13 +33,13 @@ from logging.handlers import SysLogHandler from typing import List, Sequence -# The opentelemetry library may generate exceptions we aren't expecting, this code +# The OpenTelemetry library may generate exceptions we aren't expecting: This code # must not fail or it will cause the pass-through script to fail when at worst # this script should be a noop. As such, we sometimes need to catch broad exceptions: # pylint: disable=broad-exception-caught, too-many-locals, too-many-statements # wrapt.decorator adds the extra parameters so we shouldn't provide them: # pylint: disable=no-value-for-parameter -# We only want to import opentelemetry libraries if instrumentation is enabled +# We only want to import OpenTelemetry libraries when instrumentation is enabled # pylint: disable=import-outside-toplevel DEBUG_ENABLED = os.getenv("XAPI_TEST") @@ -103,7 +103,7 @@ def _init_tracing(configs: List[str], config_dir: str): If configs is empty, return the noop span and patch_module functions. If configs are passed: - - Import the opentelemetry packages + - Import the OpenTelemetry packages - Read the configuration file - Create a tracer - Trace the script @@ -372,7 +372,7 @@ def _patch_module(module_name): # are not overridden and will be the defined no-op functions. span, patch_module = _init_tracing(observer_configs, observer_config_dir) - # If tracing is now operational, explicity set "OTEL_SDK_DISABLED" to "false". + # If tracing is now operational, explicitly set "OTEL_SDK_DISABLED" to "false". # In our case, different from the standard, we want the tracing disabled by # default, so if the env variable is not set the noop implementation is used. os.environ["OTEL_SDK_DISABLED"] = "false" diff --git a/python3/perfmon/perfmon b/python3/perfmon/perfmon index c40eb659cf6..9f26f998fd4 100644 --- a/python3/perfmon/perfmon +++ b/python3/perfmon/perfmon @@ -6,7 +6,7 @@ import os import socket import XenAPIPlugin -# TODO: put this info plus all the supported cmds in a shared file +# TODO: Document this information and all supported commands cmdsockname = "\0perfmon" # an af_unix socket name (the "\0" stops socket.bind() creating a fs node) cmdmaxlen = 256 diff --git a/scripts/examples/python/XenAPIPlugin.py b/scripts/examples/python/XenAPIPlugin.py index 87d8c23c12b..1d657f065d1 100644 --- a/scripts/examples/python/XenAPIPlugin.py +++ b/scripts/examples/python/XenAPIPlugin.py @@ -16,7 +16,7 @@ import xmlrpc.client as xmlrpclib class Failure(Exception): - """Provide compatibilty with plugins written against XenServer 5.5 API""" + """Provide compatibility with plugins written against the XenServer 5.5 API""" def __init__(self, code, params): Exception.__init__(self) diff --git a/scripts/plugins/extauth-hook-AD.py b/scripts/plugins/extauth-hook-AD.py index 0474ecacd39..38fb0b67329 100755 --- a/scripts/plugins/extauth-hook-AD.py +++ b/scripts/plugins/extauth-hook-AD.py @@ -277,7 +277,7 @@ def _add_subject(self, subject_rec): logger.debug("Permit user %s, Current sid is %s", formatted_name, sid) self._lines.append(formatted_name) - # If ssh key is permittd in authorized_keys, + # If the ssh key is permitted in the authorized_keys file, # The original name is compared, add UPN and original name if self._backend == ADBackend.BD_PBIS and name != formatted_name: self._lines.append(name) @@ -311,7 +311,7 @@ def _add_subject(self, subject_rec): class KeyValueConfig(ADConfig): """ - Only support configure files with key value in each line, seperated by sep + Only support configure files with key value in each line, separated by sep Otherwise, it will be just copied and un-configurable If multiple lines with the same key exists, only the first line will be configured """ @@ -475,7 +475,7 @@ def after_extauth_enable(session, args): def after_xapi_initialize(session, args): - """Callback afer xapi initialize""" + """Callback after xapi initialization""" return refresh_all_configurations(session, args, "after_xapi_initialize") @@ -485,7 +485,7 @@ def after_subject_add(session, args): def after_subject_remove(session, args): - """Callbackk after remove subject""" + """Callback after remove subject""" return refresh_dynamic_pam(session, args, "after_subject_remove") From f588d6a84de553516a448d323476716aeee1a4c5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 8 Jul 2024 12:00:00 +0200 Subject: [PATCH 164/341] Python3: Cleanup unused imports Signed-off-by: Bernhard Kaindl --- python3/extensions/pool_update.apply | 2 -- scripts/plugins/extauth-hook-AD.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/python3/extensions/pool_update.apply b/python3/extensions/pool_update.apply index 2bf9e0a5dcc..092da42a90b 100644 --- a/python3/extensions/pool_update.apply +++ b/python3/extensions/pool_update.apply @@ -2,14 +2,12 @@ import errno -import json import logging import os import re import shutil import subprocess import sys -import traceback import xmlrpc.client import fasteners diff --git a/scripts/plugins/extauth-hook-AD.py b/scripts/plugins/extauth-hook-AD.py index 0474ecacd39..fc359f060c7 100755 --- a/scripts/plugins/extauth-hook-AD.py +++ b/scripts/plugins/extauth-hook-AD.py @@ -10,7 +10,6 @@ # Alternatively, the extauth-hook module can be called, which will # dispatch to the correct extauth-hook-.py module automatically. import abc -import sys import subprocess import os import shutil @@ -20,7 +19,6 @@ from collections import OrderedDict from enum import Enum import XenAPIPlugin -import XenAPI # this plugin manage following configuration files for external auth From 16e37e606bc64692d622e8e3cfe2c49201b06e4a Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 8 Jul 2024 12:00:00 +0200 Subject: [PATCH 165/341] scripts/restore-sr-metadata.py: Apply 2to3, fix pytype(prepare moving it) Signed-off-by: Bernhard Kaindl --- pyproject.toml | 2 -- scripts/restore-sr-metadata.py | 46 ++++++++++++++++++---------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 07515dc95a8..2749d69956f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -260,8 +260,6 @@ discard_messages_matching = [ "No Node.TEXT_NODE in module xml.dom.minidom, referenced from 'xml.dom.expatbuilder'" ] expected_to_fail = [ - # Need 2to3 -w and maybe a few other minor updates: - "scripts/restore-sr-metadata.py", ] # ----------------------------------------------------------------------------- diff --git a/scripts/restore-sr-metadata.py b/scripts/restore-sr-metadata.py index 105591a15c5..21214fef3c5 100644 --- a/scripts/restore-sr-metadata.py +++ b/scripts/restore-sr-metadata.py @@ -3,34 +3,36 @@ # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 import atexit +import contextlib import XenAPI import os, sys, time import getopt -from xml.dom.minidom import parse +from xml.dom.minidom import parse # pytype: disable=pyi-error import codecs sys.stdout = codecs.getwriter("utf-8")(sys.stdout) sys.stderr = codecs.getwriter("utf-8")(sys.stderr) -def logout(): - try: - session.xenapi.session.logout() - except: - pass -atexit.register(logout) def usage(): - print >> sys.stderr, "%s -f -u " % sys.argv[0] + print("%s -f -u " % sys.argv[0], file=sys.stderr) sys.exit(1) def main(argv): session = XenAPI.xapi_local() + + def logout(): + with contextlib.suppress(Exception): + session.xenapi.session.logout() + + atexit.register(logout) + session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-restore-sr-metadata") try: opts, args = getopt.getopt(argv, "hf:u:", []) - except getopt.GetoptError, err: - print str(err) + except getopt.GetoptError as err: + print(str(err)) usage() infile = None @@ -47,11 +49,11 @@ def main(argv): try: doc = parse(infile) except: - print >> sys.stderr, "Error parsing %s" % infile + print("Error parsing %s" % infile, file=sys.stderr) sys.exit(1) if doc.documentElement.tagName != "meta": - print >> sys.stderr, "Unexpected root element while parsing %s" % infile + print("Unexpected root element while parsing %s" % infile, file=sys.stderr) sys.exit(1) for srxml in doc.documentElement.childNodes: @@ -60,19 +62,19 @@ def main(argv): name_label = srxml.getAttribute("name_label") name_descr = srxml.getAttribute("name_description") except: - print >> sys.stderr, "Error parsing SR tag" + print("Error parsing SR tag", file=sys.stderr) continue # only set attributes on the selected SR passed in on cmd line if sruuid is None or sruuid == "all" or sruuid == uuid: try: srref = session.xenapi.SR.get_by_uuid(uuid) - print "Setting SR (%s):" % uuid + print("Setting SR (%s):" % uuid) session.xenapi.SR.set_name_label(srref, name_label) - print " Name: %s " % name_label + print(" Name: %s " % name_label) session.xenapi.SR.set_name_description(srref, name_descr) - print " Description: %s" % name_descr + print(" Description: %s" % name_descr) except: - print >> sys.stderr, "Error setting SR data for: %s (%s)" % (uuid, name_label) + print("Error setting SR data for: %s (%s)" % (uuid, name_label), file=sys.stderr) sys.exit(1) # go through all the SR VDIs and set the name_label and description for vdixml in srxml.childNodes: @@ -81,17 +83,17 @@ def main(argv): vdi_label = vdixml.getAttribute("name_label") vdi_descr = vdixml.getAttribute("name_description") except: - print >> sys.stderr, "Error parsing VDI tag" + print("Error parsing VDI tag", file=sys.stderr) continue try: vdiref = session.xenapi.VDI.get_by_uuid(vdi_uuid) - print "Setting VDI (%s):" % vdi_uuid + print("Setting VDI (%s):" % vdi_uuid) session.xenapi.VDI.set_name_label(vdiref, vdi_label) - print " Name: %s" % vdi_label + print(" Name: %s" % vdi_label) session.xenapi.VDI.set_name_description(vdiref, vdi_descr) - print " Description: %s" % vdi_descr + print(" Description: %s" % vdi_descr) except: - print >> sys.stderr, "Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label) + print("Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label), file=sys.stderr) continue if __name__ == "__main__": From 81fecb602f65a71bc0c4fff1432eae76f7deb407 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 9 Jul 2024 12:00:00 +0200 Subject: [PATCH 166/341] scripts/xe-reset-networking: Convert tabs to spaces and fix whitespace Signed-off-by: Bernhard Kaindl --- scripts/xe-reset-networking | 473 ++++++++++++++++++------------------ 1 file changed, 236 insertions(+), 237 deletions(-) diff --git a/scripts/xe-reset-networking b/scripts/xe-reset-networking index 38f676a5aaa..f26ce9fd7d4 100755 --- a/scripts/xe-reset-networking +++ b/scripts/xe-reset-networking @@ -27,147 +27,147 @@ management_conf = '/etc/firstboot.d/data/management.conf' network_reset = '/tmp/network-reset' def read_dict_file(fname): - f = open(fname, 'r') - d = {} - for l in f.readlines(): - kv = l.split('=') - d[kv[0].strip()] = kv[1].strip().strip("'") - return d + f = open(fname, 'r') + d = {} + for l in f.readlines(): + kv = l.split('=') + d[kv[0].strip()] = kv[1].strip().strip("'") + return d def read_inventory(): - return read_dict_file(inventory_file) + return read_dict_file(inventory_file) def read_management_conf(): - return read_dict_file(management_conf) + return read_dict_file(management_conf) def write_inventory(inventory): - f = open(inventory_file, 'w') - for k in inventory: - f.write(k + "='" + inventory[k] + "'\n") - f.flush() - os.fsync(f.fileno()) - f.close() + f = open(inventory_file, 'w') + for k in inventory: + f.write(k + "='" + inventory[k] + "'\n") + f.flush() + os.fsync(f.fileno()) + f.close() def valid_vlan(vlan): - if not re.match('^\d+$', vlan): - return False - if int(vlan)<0 or int(vlan)>4094: - return False - return True + if not re.match('^\d+$', vlan): + return False + if int(vlan)<0 or int(vlan)>4094: + return False + return True if __name__ == "__main__": - parser = OptionParser() - parser.add_option("-m", "--master", help="Master's address", dest="address", default=None) - parser.add_option("--device", help="Device name of new management interface", dest="device", default=None) - parser.add_option("--mode", help='IP configuration mode for new management interface: "none", "dhcp" or "static" (default is dhcp)', dest="mode", default="dhcp") - parser.add_option("--mode-v6", help='IPv6 configuration mode for new management interface: "none", "dhcp", "autoconf" or "static" (default is none)', dest="mode_v6", default="none") - parser.add_option("--novlan", help="no vlan is used for new management interface", dest="novlan", action="store_const", const=True, default=False) - parser.add_option("--vlan", help="vlanID for new management interface to be on vlan network", dest="vlan", default=None) - parser.add_option("--ip", help="IP address for new management interface", dest="ip", default='') - parser.add_option("--ipv6", help="IPv6 address (CIDR format) for new management interface", dest="ipv6", default='') - parser.add_option("--netmask", help="Netmask for new management interface", dest="netmask", default='') - parser.add_option("--gateway", help="Gateway for new management interface", dest="gateway", default='') - parser.add_option("--gateway-v6", help="IPv6 Gateway for new management interface", dest="gateway_v6", default='') - parser.add_option("--dns", help="DNS server for new management interface", dest="dns", default='') - (options, args) = parser.parse_args() - - # Determine pool role - try: - f = open(pool_conf, 'r') - try: - l = f.readline() - ls = l.split(':') - if ls[0].strip() == 'master': - master = True - address = 'localhost' - else: - master = False - if options.address == None: - address = ls[1].strip() - else: - address = options.address - finally: - f.close() - except: - pass - - # Get the management device from the firstboot data if not specified by the user - if options.device == None: - try: - conf = read_management_conf() - device = conf['LABEL'] - except: - print("Could not figure out which interface should become the management interface. \ - Please specify one using the --device option.") - sys.exit(1) - else: - device = options.device - - # Get the VLAN if provided in the firstboot data and not specified by the user - vlan = None - if options.vlan: - if options.novlan: - parser.error('"--vlan " and "--novlan" should not be used together') - sys.exit(1) - if not valid_vlan(options.vlan): - print("VLAN tag you gave was invalid, It must be between 0 and 4094") - sys.exit(1) - vlan = options.vlan - elif not options.novlan: - try: - conf = read_management_conf() - vlan = conf['VLAN'] - except KeyError: - pass - - # Determine IP configuration for management interface - options.mode = options.mode.lower() - if options.mode not in ["none", "dhcp", "static"]: - parser.error('mode should be either "none", "dhcp" or "static"') - sys.exit(1) - - options.mode_v6 = options.mode_v6.lower() - if options.mode not in ["none", "autoconf", "dhcp", "static"]: - parser.error('mode-v6 should be either "none", "autoconf", "dhcp" or "static"') - sys.exit(1) - - if options.mode == "none" and options.mode_v6 == "none": - parser.error("Either mode or mode-v6 must be not 'none'") - sys.exit(1) - - if options.mode == 'static' and (options.ip == '' or options.netmask == ''): - parser.error("if static IP mode is selected, an IP address and netmask need to be specified") - sys.exit(1) - - if options.mode_v6 == 'static': - if options.ipv6 == '': - parser.error("if static IPv6 mode is selected, an IPv6 address needs to be specified") - elif options.ipv6.find('/') == -1: - parser.error("Invalid format: IPv6 must be specified with CIDR format: /") - sys.exit(1) - - # Warn user - if not os.access('/tmp/fist_network_reset_no_warning', os.F_OK): - configuration = [] - configuration.append("Management interface: " + device) - configuration.append("IP configuration mode: " + options.mode) - configuration.append("IPv6 configuration mode:" + options.mode_v6) - if vlan != None: - configuration.append("Vlan: " + vlan) - if options.mode == "static": - configuration.append("IP address: " + options.ip) - configuration.append("Netmask: " + options.netmask) - if options.mode_v6 == "static": - configuration.append("IPv6/CIDR: " + options.ipv6) - if options.gateway != '': - configuration.append("Gateway: " + options.gateway) - if options.gateway_v6 != '': - configuration.append("IPv6 gateway: " + options.gateway_v6) - if options.dns != '': - configuration.append("DNS server(s): " + options.dns) - if master == False: - configuration.append("Pool master's address: " + address) - warning = """---------------------------------------------------------------------- + parser = OptionParser() + parser.add_option("-m", "--master", help="Master's address", dest="address", default=None) + parser.add_option("--device", help="Device name of new management interface", dest="device", default=None) + parser.add_option("--mode", help='IP configuration mode for new management interface: "none", "dhcp" or "static" (default is dhcp)', dest="mode", default="dhcp") + parser.add_option("--mode-v6", help='IPv6 configuration mode for new management interface: "none", "dhcp", "autoconf" or "static" (default is none)', dest="mode_v6", default="none") + parser.add_option("--novlan", help="no vlan is used for new management interface", dest="novlan", action="store_const", const=True, default=False) + parser.add_option("--vlan", help="vlanID for new management interface to be on vlan network", dest="vlan", default=None) + parser.add_option("--ip", help="IP address for new management interface", dest="ip", default='') + parser.add_option("--ipv6", help="IPv6 address (CIDR format) for new management interface", dest="ipv6", default='') + parser.add_option("--netmask", help="Netmask for new management interface", dest="netmask", default='') + parser.add_option("--gateway", help="Gateway for new management interface", dest="gateway", default='') + parser.add_option("--gateway-v6", help="IPv6 Gateway for new management interface", dest="gateway_v6", default='') + parser.add_option("--dns", help="DNS server for new management interface", dest="dns", default='') + (options, args) = parser.parse_args() + + # Determine pool role + try: + f = open(pool_conf, 'r') + try: + l = f.readline() + ls = l.split(':') + if ls[0].strip() == 'master': + master = True + address = 'localhost' + else: + master = False + if options.address == None: + address = ls[1].strip() + else: + address = options.address + finally: + f.close() + except: + pass + + # Get the management device from the firstboot data if not specified by the user + if options.device == None: + try: + conf = read_management_conf() + device = conf['LABEL'] + except: + print("Could not figure out which interface should become the management interface. \ + Please specify one using the --device option.") + sys.exit(1) + else: + device = options.device + + # Get the VLAN if provided in the firstboot data and not specified by the user + vlan = None + if options.vlan: + if options.novlan: + parser.error('"--vlan " and "--novlan" should not be used together') + sys.exit(1) + if not valid_vlan(options.vlan): + print("VLAN tag you gave was invalid, It must be between 0 and 4094") + sys.exit(1) + vlan = options.vlan + elif not options.novlan: + try: + conf = read_management_conf() + vlan = conf['VLAN'] + except KeyError: + pass + + # Determine IP configuration for management interface + options.mode = options.mode.lower() + if options.mode not in ["none", "dhcp", "static"]: + parser.error('mode should be either "none", "dhcp" or "static"') + sys.exit(1) + + options.mode_v6 = options.mode_v6.lower() + if options.mode not in ["none", "autoconf", "dhcp", "static"]: + parser.error('mode-v6 should be either "none", "autoconf", "dhcp" or "static"') + sys.exit(1) + + if options.mode == "none" and options.mode_v6 == "none": + parser.error("Either mode or mode-v6 must be not 'none'") + sys.exit(1) + + if options.mode == 'static' and (options.ip == '' or options.netmask == ''): + parser.error("if static IP mode is selected, an IP address and netmask need to be specified") + sys.exit(1) + + if options.mode_v6 == 'static': + if options.ipv6 == '': + parser.error("if static IPv6 mode is selected, an IPv6 address needs to be specified") + elif options.ipv6.find('/') == -1: + parser.error("Invalid format: IPv6 must be specified with CIDR format: /") + sys.exit(1) + + # Warn user + if not os.access('/tmp/fist_network_reset_no_warning', os.F_OK): + configuration = [] + configuration.append("Management interface: " + device) + configuration.append("IP configuration mode: " + options.mode) + configuration.append("IPv6 configuration mode:" + options.mode_v6) + if vlan != None: + configuration.append("Vlan: " + vlan) + if options.mode == "static": + configuration.append("IP address: " + options.ip) + configuration.append("Netmask: " + options.netmask) + if options.mode_v6 == "static": + configuration.append("IPv6/CIDR: " + options.ipv6) + if options.gateway != '': + configuration.append("Gateway: " + options.gateway) + if options.gateway_v6 != '': + configuration.append("IPv6 gateway: " + options.gateway_v6) + if options.dns != '': + configuration.append("DNS server(s): " + options.dns) + if master == False: + configuration.append("Pool master's address: " + address) + warning = """---------------------------------------------------------------------- !! WARNING !! This command will reboot the host and reset its network configuration. @@ -179,115 +179,114 @@ Before completing this command: ---------------------------------------------------------------------- Your network will be re-configured as follows:\n\n""" - confirmation = """\n\nIf you want to change any of the above settings, type 'no' and re-run + confirmation = """\n\nIf you want to change any of the above settings, type 'no' and re-run the command with appropriate arguments (use --help for a list of options). Type 'yes' to continue. Type 'no' to cancel. """ - res = input(warning + '\n'.join(configuration) + confirmation) - if res != 'yes': - sys.exit(1) - - # Update master's IP, if needed and given - if master == False and options.address != None: - print("Setting master's ip (" + address + ")...") - try: - f = open(pool_conf, 'w') - f.write('slave:' + address) - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Construct bridge name for management interface based on convention - if device[:3] == 'eth': - bridge = 'xenbr' + device[3:] - else: - bridge = 'br' + device - - # Ensure xapi is not running - print("Stopping xapi...") - os.system('service xapi stop >/dev/null 2>/dev/null') - - # Reconfigure new management interface - print("Reconfiguring " + device + "...") - os.system('systemctl stop xcp-networkd >/dev/null 2>/dev/null') - try: - os.remove('/var/lib/xcp/networkd.db') - except Exception as e: - print('Warning: Failed to delete networkd.db.\n%s' % e) - - # Update interfaces in inventory file - print('Updating inventory file...') - inventory = read_inventory() - if vlan != None: - inventory['MANAGEMENT_INTERFACE'] = 'xentemp' - else: - inventory['MANAGEMENT_INTERFACE'] = bridge - inventory['CURRENT_INTERFACES'] = '' - write_inventory(inventory) - - # Rewrite firstboot management.conf file, which will be picked it by xcp-networkd on restart (if used) - is_static = False - try: - f = open(management_conf, 'w') - f.write("LABEL='" + device + "'\n") - if options.mode != "none": - f.write("MODE='" + options.mode + "'\n") - if options.mode_v6 != "none": - f.write("MODEV6='" + options.mode_v6 + "'\n") - if vlan != None: - f.write("VLAN='" + vlan + "'\n") - if options.mode == 'static': - is_static = True - f.write("IP='" + options.ip + "'\n") - f.write("NETMASK='" + options.netmask + "'\n") - if options.gateway != '': - f.write("GATEWAY='" + options.gateway + "'\n") - if options.mode_v6 == "static": - is_static = True - f.write("IPv6='" + options.ipv6 + "'\n") - if options.gateway_v6 != '': - f.write("IPv6_GATEWAY='" + options.gateway_v6 + "'\n") - if is_static and options.dns != '': - f.write("DNS='" + options.dns + "'\n") - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Write trigger file for XAPI to continue the network reset on startup - try: - f = open(network_reset, 'w') - f.write('DEVICE=' + device + '\n') - if options.mode != "none": - f.write('MODE=' + options.mode + '\n') - if options.mode_v6 != "none": - f.write('MODE_V6=' + options.mode_v6 + '\n') - if vlan != None: - f.write('VLAN=' + vlan + '\n') - if options.mode == 'static': - f.write('IP=' + options.ip + '\n') - f.write('NETMASK=' + options.netmask + '\n') - if options.gateway != '': - f.write('GATEWAY=' + options.gateway + '\n') - if options.mode_v6 == "static": - f.write('IPV6=' + options.ipv6 + '\n') - if options.gateway_v6 != '': - f.write('GATEWAY_V6=' + options.gateway_v6 + '\n') - if is_static and options.dns != '': - f.write('DNS=' + options.dns + '\n') - finally: - f.flush() - os.fsync(f.fileno()) - f.close() - - # Reset the domain 0 network interface naming configuration - # back to a fresh-install state for the currently-installed - # hardware. - os.system("/etc/sysconfig/network-scripts/interface-rename.py --reset-to-install") - - # Reboot - os.system("mount -o remount,rw / && reboot -f") + res = input(warning + '\n'.join(configuration) + confirmation) + if res != 'yes': + sys.exit(1) + + # Update master's IP, if needed and given + if master == False and options.address != None: + print("Setting master's ip (" + address + ")...") + try: + f = open(pool_conf, 'w') + f.write('slave:' + address) + finally: + f.flush() + os.fsync(f.fileno()) + f.close() + + # Construct bridge name for management interface based on convention + if device[:3] == 'eth': + bridge = 'xenbr' + device[3:] + else: + bridge = 'br' + device + + # Ensure xapi is not running + print("Stopping xapi...") + os.system('service xapi stop >/dev/null 2>/dev/null') + + # Reconfigure new management interface + print("Reconfiguring " + device + "...") + os.system('systemctl stop xcp-networkd >/dev/null 2>/dev/null') + try: + os.remove('/var/lib/xcp/networkd.db') + except Exception as e: + print('Warning: Failed to delete networkd.db.\n%s' % e) + + # Update interfaces in inventory file + print('Updating inventory file...') + inventory = read_inventory() + if vlan != None: + inventory['MANAGEMENT_INTERFACE'] = 'xentemp' + else: + inventory['MANAGEMENT_INTERFACE'] = bridge + inventory['CURRENT_INTERFACES'] = '' + write_inventory(inventory) + + # Rewrite firstboot management.conf file, which will be picked it by xcp-networkd on restart (if used) + is_static = False + try: + f = open(management_conf, 'w') + f.write("LABEL='" + device + "'\n") + if options.mode != "none": + f.write("MODE='" + options.mode + "'\n") + if options.mode_v6 != "none": + f.write("MODEV6='" + options.mode_v6 + "'\n") + if vlan != None: + f.write("VLAN='" + vlan + "'\n") + if options.mode == 'static': + is_static = True + f.write("IP='" + options.ip + "'\n") + f.write("NETMASK='" + options.netmask + "'\n") + if options.gateway != '': + f.write("GATEWAY='" + options.gateway + "'\n") + if options.mode_v6 == "static": + is_static = True + f.write("IPv6='" + options.ipv6 + "'\n") + if options.gateway_v6 != '': + f.write("IPv6_GATEWAY='" + options.gateway_v6 + "'\n") + if is_static and options.dns != '': + f.write("DNS='" + options.dns + "'\n") + finally: + f.flush() + os.fsync(f.fileno()) + f.close() + + # Write trigger file for XAPI to continue the network reset on startup + try: + f = open(network_reset, 'w') + f.write('DEVICE=' + device + '\n') + if options.mode != "none": + f.write('MODE=' + options.mode + '\n') + if options.mode_v6 != "none": + f.write('MODE_V6=' + options.mode_v6 + '\n') + if vlan != None: + f.write('VLAN=' + vlan + '\n') + if options.mode == 'static': + f.write('IP=' + options.ip + '\n') + f.write('NETMASK=' + options.netmask + '\n') + if options.gateway != '': + f.write('GATEWAY=' + options.gateway + '\n') + if options.mode_v6 == "static": + f.write('IPV6=' + options.ipv6 + '\n') + if options.gateway_v6 != '': + f.write('GATEWAY_V6=' + options.gateway_v6 + '\n') + if is_static and options.dns != '': + f.write('DNS=' + options.dns + '\n') + finally: + f.flush() + os.fsync(f.fileno()) + f.close() + + # Reset the domain 0 network interface naming configuration + # back to a fresh-install state for the currently-installed + # hardware. + os.system("/etc/sysconfig/network-scripts/interface-rename.py --reset-to-install") + # Reboot + os.system("mount -o remount,rw / && reboot -f") From a39d65efaf0658b6c5af56cde81c064650bbfbaf Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 9 Jul 2024 12:00:00 +0200 Subject: [PATCH 167/341] CP-49906: Move scripts/plugins/extauth-hook-AD.py to python3/plugins Signed-off-by: Ashwinh Co-authored-by: Bernhard Kaindl --- python3/Makefile | 3 ++- {scripts => python3}/plugins/extauth-hook | 0 {scripts => python3}/plugins/extauth-hook-AD.py | 12 ++++++------ {scripts => python3}/plugins/extauth_hook_ad.py | 0 {scripts => python3}/plugins/test_extauth_hook_AD.py | 0 scripts/Makefile | 2 -- 6 files changed, 8 insertions(+), 9 deletions(-) rename {scripts => python3}/plugins/extauth-hook (100%) rename {scripts => python3}/plugins/extauth-hook-AD.py (99%) rename {scripts => python3}/plugins/extauth_hook_ad.py (100%) rename {scripts => python3}/plugins/test_extauth_hook_AD.py (100%) diff --git a/python3/Makefile b/python3/Makefile index 15e0a27b57a..75416a8d7f9 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -15,7 +15,6 @@ install: $(IPROG) -d $(DESTDIR)/usr/lib/systemd/system $(IPROG) -d $(DESTDIR)$(EXTENSIONDIR) - $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ @@ -39,6 +38,8 @@ install: $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/echo.py $(DESTDIR)$(PLUGINDIR)/echo $(IPROG) plugins/openvswitch-config-update $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) + $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) $(IPROG) perfmon/perfmon $(DESTDIR)$(PLUGINDIR) $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service diff --git a/scripts/plugins/extauth-hook b/python3/plugins/extauth-hook similarity index 100% rename from scripts/plugins/extauth-hook rename to python3/plugins/extauth-hook diff --git a/scripts/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py similarity index 99% rename from scripts/plugins/extauth-hook-AD.py rename to python3/plugins/extauth-hook-AD.py index bd2c349d4ba..0123461749c 100755 --- a/scripts/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -2,6 +2,12 @@ # # extauth-hook-AD.py # +# This plugin manages the following configuration files for external authentication +# - /etc/nsswitch.conf +# - /etc/pam.d/sshd +# - /etc/pam.d/hcp_users +# - /etc/ssh/ssh_config +# # This module can be called directly as a plugin. It handles # Active Directory being enabled or disabled as the hosts external_auth_type, # or subjects being added or removed while AD is the external_auth_type, @@ -21,12 +27,6 @@ import XenAPIPlugin -# this plugin manage following configuration files for external auth -# - /etc/nsswitch.conf -# - /etc/pam.d/sshd -# - /etc/pam.d/hcp_users -# - /etc/ssh/ssh_config - # pylint: disable=too-few-public-methods # pytype: disable=ignored-abstractmethod diff --git a/scripts/plugins/extauth_hook_ad.py b/python3/plugins/extauth_hook_ad.py similarity index 100% rename from scripts/plugins/extauth_hook_ad.py rename to python3/plugins/extauth_hook_ad.py diff --git a/scripts/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py similarity index 100% rename from scripts/plugins/test_extauth_hook_AD.py rename to python3/plugins/test_extauth_hook_AD.py diff --git a/scripts/Makefile b/scripts/Makefile index 8d3196ceece..5d769d8778d 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -117,8 +117,6 @@ install: mkdir -p $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/extauth-hook $(DESTDIR)$(PLUGINDIR) - $(IPROG) plugins/extauth-hook-AD.py $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead $(IPROG) 10resetvdis $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead From c8a1b5e4435a099edcbbffe169f57c2f520c40a5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 9 Jul 2024 12:00:00 +0200 Subject: [PATCH 168/341] scripts/restore-sr-metadata.py: isort, fix pyright and pylint Signed-off-by: Bernhard Kaindl --- scripts/restore-sr-metadata.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/scripts/restore-sr-metadata.py b/scripts/restore-sr-metadata.py index 21214fef3c5..4bbb9fe55af 100644 --- a/scripts/restore-sr-metadata.py +++ b/scripts/restore-sr-metadata.py @@ -4,19 +4,19 @@ import atexit import contextlib -import XenAPI -import os, sys, time import getopt +import io +import sys from xml.dom.minidom import parse # pytype: disable=pyi-error -import codecs -sys.stdout = codecs.getwriter("utf-8")(sys.stdout) -sys.stderr = codecs.getwriter("utf-8")(sys.stderr) +import XenAPI + +sys.stdout = io.open(sys.stdout.fileno(), "w", encoding="utf-8") +sys.stderr = io.open(sys.stderr.fileno(), "w", encoding="utf-8") def usage(): print("%s -f -u " % sys.argv[0], file=sys.stderr) - sys.exit(1) def main(argv): session = XenAPI.xapi_local() @@ -34,6 +34,7 @@ def logout(): except getopt.GetoptError as err: print(str(err)) usage() + sys.exit(1) infile = None sruuid = None @@ -45,6 +46,7 @@ def logout(): if infile == None: usage() + sys.exit(1) try: doc = parse(infile) @@ -93,10 +95,12 @@ def logout(): session.xenapi.VDI.set_name_description(vdiref, vdi_descr) print(" Description: %s" % vdi_descr) except: - print("Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label), file=sys.stderr) + print( + "Error setting VDI data for: %s (%s)" % (vdi_uuid, name_label), + file=sys.stderr, + ) continue + if __name__ == "__main__": main(sys.argv[1:]) - - From deb173135a181a3a99ac61ffd55dd8fdfa1595e8 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 9 Jul 2024 12:00:00 +0200 Subject: [PATCH 169/341] CP-49906: extauth-hook-AD.py: Fix remaining pylint warnings Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth-hook-AD.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/python3/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py index 0123461749c..3a7b14f7959 100755 --- a/python3/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -44,7 +44,7 @@ def setup_logger(): log = logging.getLogger() if not os.path.exists(addr): - log.warning("{} not available, logs are not redirected".format(addr)) + log.warning("%s not available, logs are not redirected", addr) return # Send to syslog local5, which will be redirected to xapi log /var/log/xensource.log @@ -92,7 +92,7 @@ def __init__(self, path, session, args, ad_enabled=True, load_existing=True, fil self._ad_enabled = ad_enabled self._file_mode = file_mode if load_existing and os.path.exists(self._file_path): - with open(self._file_path, 'r') as file: + with open(self._file_path, "r", encoding="utf-8") as file: lines = file.readlines() self._lines = [l.strip() for l in lines] @@ -237,9 +237,8 @@ def _add_subject(self, subject_rec): def _install(self): if self._ad_enabled: super(DynamicPam, self)._install() - else: - if os.path.exists(self._file_path): - os.remove(self._file_path) + elif os.path.exists(self._file_path): + os.remove(self._file_path) class UsersList(DynamicPam): @@ -261,7 +260,7 @@ def _add_upn(self, subject_rec): if self._backend == ADBackend.BD_PBIS: # PBIS convert domain to UPPER case, we revert it back domain = domain.lower() - self._lines.append(u"{}{}{}".format(user, sep, domain)) + self._lines.append("{}{}{}".format(user, sep, domain)) except KeyError: logger.info("subject does not have upn %s", subject_rec) except ValueError: @@ -362,7 +361,7 @@ def _apply_value(self, key, value): if self._is_special_line(key): line = value else: # normal line, construct the key value pair - sep = self._sep if self._sep else " " + sep = self._sep or " " line = "{}{}{}".format(key, sep, value) self._lines.append(line) From fb98229e86250d6add9cf75e65ad74bdc4726877 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Tue, 9 Jul 2024 15:13:00 +0000 Subject: [PATCH 170/341] CP-49900: Removed templates folder from python3/ - Removed templates debian and debug from Makefile Signed-off-by: Ashwinh --- python3/Makefile | 3 - python3/templates/debian | 171 --------------------------------------- python3/templates/debug | 7 -- 3 files changed, 181 deletions(-) delete mode 100644 python3/templates/debian delete mode 100755 python3/templates/debug diff --git a/python3/Makefile b/python3/Makefile index 15e0a27b57a..034a472ec1c 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -44,9 +44,6 @@ install: $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service $(IPROG) perfmon/sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon -# templates - $(IPROG) templates/debian $(DESTDIR)$(OPTDIR)/packages/post-install-scripts/debian-etch - $(IPROG) templates/debug $(DESTDIR)$(OPTDIR)/packages/post-install-scripts # poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py diff --git a/python3/templates/debian b/python3/templates/debian deleted file mode 100644 index 4e9b12a8714..00000000000 --- a/python3/templates/debian +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2005-2007 XenSource, Inc - -# Code ripped out of 'xgt' script for now -from __future__ import print_function - -import os -import signal -import socket -import sys - -import commands -import httplib -import urllib2 -import xmlrpclib - -verbose = True - - -##### begin hack. Provide xmlrpc over UNIX domain socket (cut+pasted from eliloader): -class UDSHTTPConnection(httplib.HTTPConnection): - """Stupid hacked up HTTPConnection subclass to allow HTTP over Unix domain - sockets.""" - - def connect(self): - path = self.host.replace("_", "/") - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.sock.connect(path) - - -class UDSHTTP(httplib.HTTP): - _connection_class = UDSHTTPConnection - - -class UDSTransport(xmlrpclib.Transport): - def make_connection(self, host): - return UDSHTTP(host) - - -def xapi_local(): - return xmlrpclib.Server("http://_var_xapi_xapi/", transport=UDSTransport()) - - -##### end hack. - - -class CommandException(Exception): - pass - - -def run(cmd, *args): - debug("+ " + cmd % args) - (ret, out) = commands.getstatusoutput(cmd % args) - if verbose: - try: - for line in out.split("\n"): - log("| " + line) - except TypeError as e: - pass - if ret != 0: - debug("run - command %s failed with %d", cmd, ret) - raise CommandException(out) - return out - - -def log(fmt, *args): - print(fmt % args) - - -def debug(msg, *args): - if verbose: - print(msg % args) - - -def create_partition(lvpath): - # 1. write a partition table: - pipe = os.popen("/sbin/fdisk %s" % lvpath, "w") - - pipe.write("n\n") # new partition - pipe.write("p\n") # primary - pipe.write("1\n") # 1st partition - pipe.write("\n") # default start cylinder - pipe.write("\n") # size: as big as image - pipe.write("w\n") # write partition table - - # XXX we must ignore certain errors here as fdisk will - # sometimes return non-zero signalling error conditions - # we don't care about. Should fix to detect these cases - # specifically. - rc = pipe.close() - if rc == None: - rc = 0 - log("fdisk exited with rc %d (some non-zero exits can be ignored safely)." % rc) - - -def map_partitions(lvpath): - run("/sbin/kpartx -a %s", lvpath) - ps = [] - for line in run("/sbin/kpartx -l %s" % lvpath).split("\n"): - ps.append("/dev/mapper/" + line.split()[0]) - return ps - - -def unmap_partitions(lvpath): - run("/sbin/kpartx -d %s", lvpath) - - -def umount(mountpoint): - run("umount -l %s", mountpoint) - - -if __name__ == "__main__": - # os.setpgrp() - xvda = os.getenv("xvda") - xvdb = os.getenv("xvdb") - debug("Guest's xvda is on %s" % xvda) - debug("Guest's xvdb is on %s" % xvdb) - if xvda == None or xvdb == None: - raise ValueError ("Need to pass in device names for xvda and xvdb through the environment") - - vm = os.getenv("vm") - - server = xapi_local() - try: - session_id = server.session.login_with_password( - "", "", "1.0", "xen-api-scripts-debian" - )["Value"] - uuid = server.VM.get_uuid(session_id, vm)["Value"] - mountpoint = "/tmp/installer/%s" % (uuid) - finally: - server.session.logout(session_id) - - def sighandler(signum, frame): - umount(mountpoint) - os.killpg(0, signal.SIGKILL) - exit(1) - - signal.signal(signal.SIGTERM, sighandler) - - create_partition(xvda) - create_partition(xvdb) - - try: - xvda_parts = map_partitions(xvda) - - run("/sbin/mkfs.ext3 %s", xvda_parts[0]) - - xgt = "@SHAREDIR@/packages/xgt/%s.xgt" % os.path.basename(sys.argv[0]) - - run("/bin/mkdir -p %s", mountpoint) - try: - run("/bin/mount %s %s", xvda_parts[0], mountpoint) - run("/usr/bin/unzip -p %s root.tar.bz2 | tar -C %s -jx", xgt, mountpoint) - finally: - run("/bin/umount %s", mountpoint) - run("/bin/rmdir %s", mountpoint) - run("/usr/bin/unzip -p %s swap.img | dd of=%s oflag=direct bs=1M", xgt, xvdb) - - try: - session_id = server.session.login_with_password( - "", "", "1.0", "xen-api-scripts-debian" - )["Value"] - vbds = server.VM.get_VBDs(session_id, vm)["Value"] - for i in vbds: - dev = server.VBD.get_userdevice(session_id, i)["Value"] - if dev == "0": - server.VBD.set_bootable(session_id, i, True) - finally: - server.session.logout(session_id) - finally: - unmap_partitions(xvda) diff --git a/python3/templates/debug b/python3/templates/debug deleted file mode 100755 index 85656ebf2d9..00000000000 --- a/python3/templates/debug +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - - -# Script should be passed a session_id, VM reference and set of block -# devices via the environment - -set > /tmp/debug-install-script \ No newline at end of file From 4c023d354a34ca0cfc09f3a8b60e85d7f2236e88 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 10 Jul 2024 12:00:00 +0200 Subject: [PATCH 171/341] XenAPIPlugin.py: Remove the superflous catch & raise of SystemExit In `XenAPIPlugin.py`'s `dispatch()` function, SystemExit does not need to be caught and raised because both other exceptions are subclasses of Exception: By design, SystemExit is a subclass of BaseException and because we are not catching BaseException and also not use a bare `except:` here, we can cleanup catching and re-raising `SystemExit()` here. Reference: https://docs.python.org/3/library/exceptions.html#SystemExit Signed-off-by: Bernhard Kaindl --- scripts/examples/python/XenAPIPlugin.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/examples/python/XenAPIPlugin.py b/scripts/examples/python/XenAPIPlugin.py index 1d657f065d1..82f1f2f8531 100644 --- a/scripts/examples/python/XenAPIPlugin.py +++ b/scripts/examples/python/XenAPIPlugin.py @@ -44,9 +44,6 @@ def dispatch(fn_table): try: result = fn_table[methodname](x, args) print(success_message(result)) - except SystemExit: - # SystemExit should not be caught, as it is handled elsewhere in the plugin system. - raise except Failure as e: print(failure_message(e.params)) except Exception as e: From 37e615e30f465a150827e9498814c3dfda80b6b8 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 11 Jul 2024 10:59:51 +0000 Subject: [PATCH 172/341] CP-50100: Moved backup-sr-metadata.py from scripts/ to python3/libexec directory - Modified python3/Makefile to include this change. - Removed backup-sr-metadata.py from scripts/Makefile Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3/libexec}/backup-sr-metadata.py | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 1 deletion(-) rename {scripts => python3/libexec}/backup-sr-metadata.py (100%) diff --git a/python3/Makefile b/python3/Makefile index 75416a8d7f9..715257cdc8d 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -26,6 +26,7 @@ install: $(IPROG) libexec/probe-device-for-file $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/print-custom-templates $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/mail-alarm $(DESTDIR)$(LIBEXECDIR) + $(IPROG) libexec/backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/backup-sr-metadata.py b/python3/libexec/backup-sr-metadata.py similarity index 100% rename from scripts/backup-sr-metadata.py rename to python3/libexec/backup-sr-metadata.py diff --git a/scripts/Makefile b/scripts/Makefile index 5d769d8778d..cd32cd2f0df 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -110,7 +110,6 @@ install: $(IPROG) host-bugreport-upload $(DESTDIR)$(LIBEXECDIR)/host-bugreport-upload $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin - $(IPROG) backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) From c1ea5b3ea3494ddd32d74f00537afec4a55186a7 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 11 Jul 2024 12:27:55 +0000 Subject: [PATCH 173/341] CP-50099: Moved restore-sr-metadata.py from scripts/ to python3/libexec directory - Modified python3 Makefile to include this change - Removed restore-sr-metadata.py from scripts/Makefile - Fixed bare-except exception pylint issue Signed-off-by: Ashwinh --- python3/Makefile | 2 +- {scripts => python3/libexec}/restore-sr-metadata.py | 0 scripts/Makefile | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) rename {scripts => python3/libexec}/restore-sr-metadata.py (100%) diff --git a/python3/Makefile b/python3/Makefile index 715257cdc8d..5a427961371 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -27,7 +27,7 @@ install: $(IPROG) libexec/print-custom-templates $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/mail-alarm $(DESTDIR)$(LIBEXECDIR) $(IPROG) libexec/backup-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) - + $(IPROG) libexec/restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/restore-sr-metadata.py b/python3/libexec/restore-sr-metadata.py similarity index 100% rename from scripts/restore-sr-metadata.py rename to python3/libexec/restore-sr-metadata.py diff --git a/scripts/Makefile b/scripts/Makefile index cd32cd2f0df..94dfc412718 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -110,7 +110,6 @@ install: $(IPROG) host-bugreport-upload $(DESTDIR)$(LIBEXECDIR)/host-bugreport-upload $(IPROG) xe-backup-metadata $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-restore-metadata $(DESTDIR)$(OPTDIR)/bin - $(IPROG) restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(EXTENSIONDIR) From 8f972d80ac528f78acdd2635cf61a674a47d7261 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Wed, 10 Jul 2024 12:52:29 +0000 Subject: [PATCH 174/341] CP-49919: mv scripts/extensions/pool_update.precheck to python3/extensions Original (code supplied) by Bernhard Kaindl: - Declare missing methods to python3/stubs/XenAPI.py for pyright - Initialize variables to fix pyright:reportPossiblyUnboundVariable - Applied isort Signed-off-by: Ashwinh Signed-off-by: Bernhard Kaindl --- python3/Makefile | 2 +- .../extensions/pool_update.precheck | 29 ++++++++++--------- python3/stubs/XenAPI.pyi | 2 ++ scripts/Makefile | 1 - 4 files changed, 19 insertions(+), 15 deletions(-) rename {scripts => python3}/extensions/pool_update.precheck (98%) diff --git a/python3/Makefile b/python3/Makefile index 75416a8d7f9..c9b6fe73f4c 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -32,7 +32,7 @@ install: $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) - + $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) $(IPROG) plugins/disk-space $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/install-supp-pack $(DESTDIR)$(PLUGINDIR) diff --git a/scripts/extensions/pool_update.precheck b/python3/extensions/pool_update.precheck similarity index 98% rename from scripts/extensions/pool_update.precheck rename to python3/extensions/pool_update.precheck index 161fad13740..1004ae5c736 100755 --- a/scripts/extensions/pool_update.precheck +++ b/python3/extensions/pool_update.precheck @@ -1,23 +1,23 @@ #!/usr/bin/env python3 -import xmlrpc.client -import sys -import XenAPI -import json -import urllib.request, urllib.error, urllib.parse -import xml.dom.minidom -import traceback -import subprocess -import os +import configparser import errno -import re -import shutil import io -import configparser import logging -import xcp.logger +import os +import re +import shutil +import subprocess +import sys +import urllib.error +import urllib.parse +import urllib.request +import xml.dom.minidom +import xmlrpc.client +import xcp.logger +import XenAPI TMP_DIR = '/tmp/' UPDATE_DIR = '/var/update/' @@ -234,6 +234,9 @@ if __name__ == '__main__': update_vdi_valid = False session = None + update_package = None + update = None + yum_conf_file = "" try: session = XenAPI.xapi_local() session.xenapi.login_with_password('root', '', '', 'Pool_update') diff --git a/python3/stubs/XenAPI.pyi b/python3/stubs/XenAPI.pyi index bde962b0556..ede1e13d5f5 100644 --- a/python3/stubs/XenAPI.pyi +++ b/python3/stubs/XenAPI.pyi @@ -50,6 +50,8 @@ class _Dispatcher: VDI: Incomplete PBD: Incomplete pool: Incomplete + host: Incomplete + pool_update: Incomplete VM: Incomplete diff --git a/scripts/Makefile b/scripts/Makefile index 5d769d8778d..4aef6854d7d 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -115,7 +115,6 @@ install: $(IPROG) backup-metadata-cron $(DESTDIR)$(LIBEXECDIR) $(IPROG) pbis-force-domain-leave $(DESTDIR)$(LIBEXECDIR) mkdir -p $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) mkdir -p $(DESTDIR)$(PLUGINDIR) $(IPROG) plugins/firewall-port $(DESTDIR)$(PLUGINDIR) mkdir -p $(DESTDIR)$(HOOKSDIR)/host-post-declare-dead From dd7d37a5656d9f5dddde0ab6f6b2fe3c902658db Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 11 Jul 2024 12:00:00 +0200 Subject: [PATCH 175/341] python3/plugins/test_extauth_hook_AD.py: Assert the current bug Signed-off-by: Bernhard Kaindl --- python3/plugins/test_extauth_hook_AD.py | 31 +++++++++++++++++-------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/python3/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py index 1960072f3f1..d3bee7670ad 100644 --- a/python3/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -1,13 +1,12 @@ """ Test module for extauth_hook_ad """ -#pylint: disable=invalid-name -import sys + +import logging import os +import sys from unittest import TestCase -from mock import MagicMock, patch - -import pytest +from unittest.mock import MagicMock, patch # mock modules to avoid dependencies sys.modules["XenAPIPlugin"] = MagicMock() @@ -15,11 +14,23 @@ # pylint: disable=wrong-import-position # Import must after mock modules from extauth_hook_ad import StaticSSHPam, NssConfig, SshdConfig, UsersList, GroupsList - - -if sys.version_info < (3, ): # pragma: no cover - pytest.skip(allow_module_level=True) - +from extauth_hook_ad import run_cmd + +def test_run_cmd(caplog): + """Assert the current buggy behavior of the run_cmd function after py3 migration""" + cmd = ["echo", " Hello World! "] + + # Call the function under test, check the return value and capture the log message + with caplog.at_level(logging.DEBUG): + # Bug in the current code, the result is a byte string: + assert run_cmd(cmd) == cmd[1].strip().encode() + + # Bug in the current code after not fully tested py3 migration: + # The logged message contains a byte string that is not stripped: + assert caplog.records[0].message == "%s -> b' Hello World! \\n'" % (cmd) + # Test the case where the command fails: + assert run_cmd(["bad command"]) is None + assert caplog.records[1].message == "Failed to run command ['bad command']" def line_exists_in_config(lines, line): """ From 3317717508a11c8c1d6b4963561a5afe949212c5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 11 Jul 2024 12:00:00 +0200 Subject: [PATCH 176/341] python3/plugins/extauth-hook-AD.py: Fix logging of run_cmd() Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth-hook-AD.py | 17 +++++++---------- python3/plugins/test_extauth_hook_AD.py | 9 ++++----- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/python3/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py index 3a7b14f7959..a67d35b10b8 100755 --- a/python3/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -61,17 +61,14 @@ def setup_logger(): logger = logging.getLogger(__name__) -def run_cmd(cmd, log_cmd=True): - """Helper function to run command""" +def run_cmd(command: "list[str]"): + """Helper function to run a command and log the output""" try: - result = subprocess.check_output(cmd) - if log_cmd: - msg = "{} -> {}".format(cmd, result) - logger.debug(msg) - return result.strip() - except Exception: # pylint: disable=broad-except - logger.exception("Failed to run command %s", cmd) - return None + output = subprocess.check_output(command, universal_newlines=True) + logger.debug("%s -> %s", command, output.strip()) + + except OSError: + logger.exception("Failed to run command %s", command) class ADBackend(Enum): diff --git a/python3/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py index d3bee7670ad..616884101c6 100644 --- a/python3/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -22,12 +22,11 @@ def test_run_cmd(caplog): # Call the function under test, check the return value and capture the log message with caplog.at_level(logging.DEBUG): - # Bug in the current code, the result is a byte string: - assert run_cmd(cmd) == cmd[1].strip().encode() + assert run_cmd(cmd) is None # The return value is None (not used in the code) + + # Assert the log message + assert caplog.records[0].message == "%s -> Hello World!" % (cmd) - # Bug in the current code after not fully tested py3 migration: - # The logged message contains a byte string that is not stripped: - assert caplog.records[0].message == "%s -> b' Hello World! \\n'" % (cmd) # Test the case where the command fails: assert run_cmd(["bad command"]) is None assert caplog.records[1].message == "Failed to run command ['bad command']" From 959721e36b4e4a3ac7e52e807cc6e739315efb44 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Fri, 12 Jul 2024 09:00:00 +0200 Subject: [PATCH 177/341] mypy: Fix and improve the config to make it more usable Signed-off-by: Bernhard Kaindl --- pyproject.toml | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2749d69956f..588e8249222 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # https://packaging.python.org/en/latest/specifications/pyproject-toml/ [project] name = "xen-api" -requires-python = ">=3.6.*" +requires-python = ">=3.6.0" license = {file = "LICENSE"} keywords = ["xen-project", "Xen", "hypervisor", "libraries"] maintainers = [ @@ -119,10 +119,19 @@ ensure_newline_before_comments = false # PYTHONPATH="scripts/examples/python:.:scripts:scripts/plugins:scripts/examples" files = [ "python3", - "scripts/usb_reset.py", + "scripts/examples/python", +] +exclude = [ + "python3/packages", + "python3/stubs", + "python3/tests", ] pretty = true +mypy_path = "python3/packages:python3/stubs:scripts/examples/python" error_summary = true +# default_return = false sets the default return type of functions to 'Any'. +# It makes mypy less noisy on untyped code makes it more usable now: +default_return = false strict_equality = true show_error_codes = true show_error_context = true @@ -138,7 +147,16 @@ disallow_any_explicit = false disallow_any_generics = true disallow_any_unimported = true disallow_subclassing_any = true -disable_error_code = ["import-untyped"] # XenAPI is not typed yet +disable_error_code = [ + "explicit-override", + "misc", + "no-any-decorated", + "no-any-expr", + "no-untyped-call", + "no-untyped-def", + "no-untyped-usage", + "import-untyped", # XenAPI is not typed yet +] [[tool.mypy.overrides]] From df8986839c57d4d920469f6f19f4109b2fb8649a Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Thu, 11 Jul 2024 13:22:39 +0000 Subject: [PATCH 178/341] CP-49928: Moved static-vdis from scripts/ to python3/bin directory - Modified python3/Makefile to include this change - Removed static-vdis from scripts/Makefile - Modified test_static_vdis.py to include new location of the static-vdis Signed-off-by: Ashwinh --- python3/Makefile | 1 + {scripts => python3/bin}/static-vdis | 0 python3/tests/test_static_vdis.py | 2 +- scripts/Makefile | 1 - 4 files changed, 2 insertions(+), 2 deletions(-) rename {scripts => python3/bin}/static-vdis (100%) diff --git a/python3/Makefile b/python3/Makefile index 8df2788f583..81783bcd6c0 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -32,6 +32,7 @@ install: $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/static-vdis $(DESTDIR)$(OPTDIR)/bin $(IPROG) extensions/pool_update.apply $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/pool_update.precheck $(DESTDIR)$(EXTENSIONDIR) $(IPROG) extensions/Test.test $(DESTDIR)$(EXTENSIONDIR) diff --git a/scripts/static-vdis b/python3/bin/static-vdis similarity index 100% rename from scripts/static-vdis rename to python3/bin/static-vdis diff --git a/python3/tests/test_static_vdis.py b/python3/tests/test_static_vdis.py index 1b7efc0bcf0..ef4e24d7f31 100644 --- a/python3/tests/test_static_vdis.py +++ b/python3/tests/test_static_vdis.py @@ -16,7 +16,7 @@ def static_vdis() -> ModuleType: """Test fixture to return the static-vdis module, mocked to avoid dependencies.""" with mocked_modules("XenAPI", "inventory"): - return import_file_as_module("scripts/static-vdis") + return import_file_as_module("python3/bin/static-vdis") # Hide pylint warnings for redefined-outer-name from using the static_vdis fixture: diff --git a/scripts/Makefile b/scripts/Makefile index 198141f594a..bbaa23db002 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -95,7 +95,6 @@ install: $(IPROG) xe-edit-bootloader $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-get-network-backend $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-enable-all-plugin-metrics $(DESTDIR)$(OPTDIR)/bin - $(IPROG) static-vdis $(DESTDIR)$(OPTDIR)/bin $(IPROG) with-vdi $(DESTDIR)$(OPTDIR)/debug $(IPROG) import-update-key $(DESTDIR)$(OPTDIR)/debug $(IPROG) pool.conf $(DESTDIR)$(ETCXENDIR) From 91b646d0afb7936057dc09009bb8b9f39f9bdcf0 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Fri, 12 Jul 2024 12:00:00 +0200 Subject: [PATCH 179/341] scripts/generate-iscsi-iqn: Fix inline Python to work in Py3 Signed-off-by: Bernhard Kaindl --- scripts/generate-iscsi-iqn | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/generate-iscsi-iqn b/scripts/generate-iscsi-iqn index 0d662b0441c..882a4c7f6fd 100755 --- a/scripts/generate-iscsi-iqn +++ b/scripts/generate-iscsi-iqn @@ -21,7 +21,8 @@ def f(x): tmp = x.rstrip().split(".") tmp.reverse() return ".".join(tmp) -if __name__ == "__main__": print f(sys.argv[1]) + +if __name__ == "__main__": print(f(sys.argv[1])) ' geniqn() { From 889b1bf9a5a1bfd76f0f8d12b4de8315e3953338 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Fri, 12 Jul 2024 12:00:00 +0200 Subject: [PATCH 180/341] rm ocaml/idl/ocaml_backend/python: remove obsolete example scripts These example scripts were imported in 2009 and are obsoleted by samples like: https://github.com/xapi-project/xen-api-sdk/blob/master/python/samples/powercycle.py - The sample xen-api-sdk/python/samples/powercycle.py is much better. - They are not installed and not otherwise mentioned in the repository. Signed-off-by: Bernhard Kaindl --- ocaml/idl/ocaml_backend/python/list_vms.py | 10 ----- ocaml/idl/ocaml_backend/python/pause_vm.py | 6 --- ocaml/idl/ocaml_backend/python/test_client.py | 44 ------------------- ocaml/idl/ocaml_backend/python/unpause_vm.py | 6 --- pyproject.toml | 2 - 5 files changed, 68 deletions(-) delete mode 100755 ocaml/idl/ocaml_backend/python/list_vms.py delete mode 100755 ocaml/idl/ocaml_backend/python/pause_vm.py delete mode 100755 ocaml/idl/ocaml_backend/python/test_client.py delete mode 100755 ocaml/idl/ocaml_backend/python/unpause_vm.py diff --git a/ocaml/idl/ocaml_backend/python/list_vms.py b/ocaml/idl/ocaml_backend/python/list_vms.py deleted file mode 100755 index 0d7a75313cb..00000000000 --- a/ocaml/idl/ocaml_backend/python/list_vms.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://melton:8086"); -session = server.session.login_with_password("root", "xenroot", "1.0", "xen-api-list-vms.py")['Value'] -print session -vms = server.VM.get_all(session)['Value'] -print vms -#for vm in vms: -# print vm,server.VM.get_kernel__kernel(session, vm) diff --git a/ocaml/idl/ocaml_backend/python/pause_vm.py b/ocaml/idl/ocaml_backend/python/pause_vm.py deleted file mode 100755 index 2795496e1cd..00000000000 --- a/ocaml/idl/ocaml_backend/python/pause_vm.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://localhost:8086"); -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-pause-vm.py")['Value'] -server.VM.do_pause(session, '7366a41a-e50e-b891-fa0c-ca5b4d2e3f1c') diff --git a/ocaml/idl/ocaml_backend/python/test_client.py b/ocaml/idl/ocaml_backend/python/test_client.py deleted file mode 100755 index 05888c97db7..00000000000 --- a/ocaml/idl/ocaml_backend/python/test_client.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python - -import getopt, sys, xmlrpclib - -url = "http://dhcp108:70000" #default -parsed = getopt.getopt(sys.argv[1:], "u:url") -if len(parsed[0]) == 1: - url = parsed[0][0][1] - -# Create an object to represent our server. -server = xmlrpclib.Server(url); - -# Call the server and get our result. -print "Logging in... ", -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-test-client.py") -print "OK" -print "Session ID: \""+session+"\"" -vm_list = server.VM.do_list(session) - -print "VM list = " + repr(vm_list) - -for vm in vm_list: - print "VM ", vm, " in state: ", server.VM.get_power_state(session, vm) - -first_vm = vm_list[0] -other = server.VM.get_otherConfig(session, first_vm) -print repr(other) - - -#state = server.VM.get_power_state(session, first_vm) -#if state == "Halted": -# print "Starting first VM... ", -# server.VM.do_start(session, first_vm, 1==0) -#elif state == "Suspended": -# print "Restoring first VM..." -# server.VM.do_unhibernate(session, first_vm, 1==0) -#elif state == "Running": -# print "Suspending first VM... ", -# server.VM.do_hibernate(session, first_vm, 1==1) -#print "OK" - -print "Logging out... ", -server.Session.do_logout(session) -print "OK" diff --git a/ocaml/idl/ocaml_backend/python/unpause_vm.py b/ocaml/idl/ocaml_backend/python/unpause_vm.py deleted file mode 100755 index 97d748e1dca..00000000000 --- a/ocaml/idl/ocaml_backend/python/unpause_vm.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -import xmlrpclib -server = xmlrpclib.Server("http://localhost:8086"); -session = server.Session.do_login_with_password("user", "passwd", "1.0", "xen-api-unpause-vm.py")['Value'] -server.VM.do_unpause(session, '7366a41a-e50e-b891-fa0c-ca5b4d2e3f1c') diff --git a/pyproject.toml b/pyproject.toml index 2749d69956f..555f1940203 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -287,8 +287,6 @@ inputs = [ # To be added later, # when converted to Python3-compatible syntax: - # "ocaml/message-switch/python", - # "ocaml/idl/ocaml_backend/python", # "ocaml/xapi-storage/python", ] disable = [ From 418450d7167590cd89ac9225c9320c636be2d3a8 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Fri, 12 Jul 2024 12:00:00 +0200 Subject: [PATCH 181/341] Update scripts/test_mail-alarm.py to test with Python3 and move it Signed-off-by: Bernhard Kaindl --- {scripts => python3/tests}/test_mail-alarm.py | 64 ++++--------------- 1 file changed, 14 insertions(+), 50 deletions(-) rename {scripts => python3/tests}/test_mail-alarm.py (92%) diff --git a/scripts/test_mail-alarm.py b/python3/tests/test_mail-alarm.py similarity index 92% rename from scripts/test_mail-alarm.py rename to python3/tests/test_mail-alarm.py index acd5f5f20a5..c1d225eeac2 100644 --- a/scripts/test_mail-alarm.py +++ b/python3/tests/test_mail-alarm.py @@ -2,26 +2,13 @@ # test_mail-alarm.py: uses unittest to test script "mail-alarm" # -import tempfile -import os -import shutil import sys import unittest -import mock -import pytest +from unittest import mock -if sys.version_info > (2, ): - pytest.skip(allow_module_level=True) - -def nottest(obj): - obj.__test__ = False - return obj - -sys.path.append("./scripts/examples/python") -sys.modules["xcp"] = mock.Mock() - -log_file_global = None +from python3.tests.import_helper import import_file_as_module, mocked_modules +log_strs = "" XML_MESSAGE_TEMPLATE = """ 63102OpaqueRef:46be74f4-3a26-31a8-a629-d52584fe6ed3{alarm}3{cls}2e00443d-ac29-4940-8433-a15dda1e8f8e20170516T16:30:00Z0d985f5e-6d91-3410-f853-040d0906a4b9{body}""" @@ -56,28 +43,16 @@ def get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str): def log_err(err): - global log_file_global - with open(log_file_global, "a+") as fileh: - fileh.write("%s: %s\n" % (sys.argv[0], err)) + global log_strs # pylint: disable=global-statement + log_strs = log_strs + "%s: %s\n" % (sys.argv[0], err) + + +with mocked_modules("xcp"): + mailalarm = import_file_as_module("python3/libexec/mail-alarm") + mock_setup(mailalarm) class TestXapiMessage(unittest.TestCase): - def setUp(self): - global log_file_global - try: - self.work_dir = tempfile.mkdtemp(prefix="test-mail-alarm-") - log_file_global = os.path.join(self.work_dir, "user.log") - src_file = "./scripts/mail-alarm" - dst_file = os.path.join(self.work_dir, "mailalarm.py") - shutil.copyfile(src_file, dst_file) - sys.path.append(self.work_dir) - except: - raise - - def tearDown(self): - shutil.rmtree(self.work_dir, ignore_errors=True) - - @nottest def common_test_good_input( self, xmlalarm_str, @@ -87,11 +62,6 @@ def common_test_good_input( body_str, xmlbody_str=XML_BODY_COMMON, ): - import mailalarm - - # Emulate functions with Mock - mock_setup(mailalarm) - session = mock.Mock() tst_xml = get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str) @@ -104,7 +74,6 @@ def common_test_good_input( self.assertIn(subject_str, mail_subject) self.assertIn(body_str, mail_body) - @nottest def common_test_bad_input( self, xmlalarm_str, @@ -114,12 +83,6 @@ def common_test_bad_input( subtitle_str, xmlbody_str=XML_BODY_COMMON, ): - global log_file_global - import mailalarm - - # Emulate functions with Mock - mock_setup(mailalarm) - session = mock.Mock() tst_xml = get_alarm_xml(xmlalarm_str, xmlcls_str, xmlname_str, xmlbody_str) @@ -128,9 +91,11 @@ def common_test_bad_input( mail_subject = obj_XapiMessage.generate_email_subject() mail_body = obj_XapiMessage.generate_email_body() + assert mail_subject and mail_body # They're tested by test_good_mail_language() - with open(log_file_global, "r") as fileh: - log_strs = fileh.read() + # Assert the logged error messages for the bad language pack that are + # recorded in `log_str` by `log_err()` when the language pack is not found + # by `generate_email_subject()` and `generate_email_body()`: self.assertIn("Read mail language pack error", log_strs) self.assertIn( @@ -146,7 +111,6 @@ def common_test_bad_input( log_strs, ) - os.remove(log_file_global) def test_good_mail_language(self): ## Test cpu_usage alarm From e6f9ff580c351d9cab8fe824540a58fd6ab794d0 Mon Sep 17 00:00:00 2001 From: Ashwinh Date: Mon, 15 Jul 2024 12:00:50 +0000 Subject: [PATCH 182/341] CP-49931: Move scripts/xe-reset-networking to python3/bin, fix pyright Fix pyright: - Rework unclean exception handling using a contextmanager. - Declare address and master in case of an Exception as well. - Fix warning on unsupported escape sequence using a raw string. - Removed python2 script check from pyproject.toml Signed-off-by: Bernhard Kaindl Signed-off-by: Ashwinh --- pyproject.toml | 13 ----- python3/Makefile | 1 + {scripts => python3/bin}/xe-reset-networking | 59 ++++++++++---------- scripts/Makefile | 1 - 4 files changed, 29 insertions(+), 45 deletions(-) rename {scripts => python3/bin}/xe-reset-networking (92%) diff --git a/pyproject.toml b/pyproject.toml index 2749d69956f..352014a78e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -271,19 +271,6 @@ inputs = [ # Python 3 "python3/", "ocaml/xcp-rrdd", - # Python2: These will generate warnings that need to be fixed: - "scripts/static-vdis", - "scripts/generate-iscsi-iqn", - "scripts/hatests", - "scripts/host-display", - "scripts/mail-alarm", - "scripts/print-custom-templates", - "scripts/probe-device-for-file", - "scripts/xe-reset-networking", - "scripts/xe-scsi-dev-map", - "scripts/examples/python", - "scripts/yum-plugins", - "scripts/*.py", # To be added later, # when converted to Python3-compatible syntax: diff --git a/python3/Makefile b/python3/Makefile index 81783bcd6c0..d33d89d81f3 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -30,6 +30,7 @@ install: $(IPROG) libexec/restore-sr-metadata.py $(DESTDIR)$(LIBEXECDIR) $(IPROG) bin/hfx_filename $(DESTDIR)$(OPTDIR)/bin + $(IPROG) bin/xe-reset-networking $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/perfmon $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/xe-scsi-dev-map $(DESTDIR)$(OPTDIR)/bin $(IPROG) bin/static-vdis $(DESTDIR)$(OPTDIR)/bin diff --git a/scripts/xe-reset-networking b/python3/bin/xe-reset-networking similarity index 92% rename from scripts/xe-reset-networking rename to python3/bin/xe-reset-networking index f26ce9fd7d4..c1e1d908261 100755 --- a/scripts/xe-reset-networking +++ b/python3/bin/xe-reset-networking @@ -14,18 +14,30 @@ GNU Lesser General Public License for more details. """ from __future__ import print_function -import sys import os -import time import re +import sys +from contextlib import contextmanager from optparse import OptionParser -#import XenAPI pool_conf = '@ETCXENDIR@/pool.conf' inventory_file = '@INVENTORY@' management_conf = '/etc/firstboot.d/data/management.conf' network_reset = '/tmp/network-reset' + +@contextmanager +def fsync_write(filename): + """Context manager that writes to a file and fsyncs it after writing.""" + + with open(filename, "w", encoding="utf-8") as file: + try: # Run the context, ignoring exceptions: + yield file + finally: + file.flush() # Flush the file buffer to the OS + os.fsync(file.fileno()) # Ask the OS to write the file to disk + + def read_dict_file(fname): f = open(fname, 'r') d = {} @@ -40,16 +52,15 @@ def read_inventory(): def read_management_conf(): return read_dict_file(management_conf) -def write_inventory(inventory): - f = open(inventory_file, 'w') - for k in inventory: - f.write(k + "='" + inventory[k] + "'\n") - f.flush() - os.fsync(f.fileno()) - f.close() + +def write_inventory(inventory_dict): + with fsync_write(inventory_file) as file: + for k in inventory_dict: + file.write(k + "='" + inventory_dict[k] + "'\n") + def valid_vlan(vlan): - if not re.match('^\d+$', vlan): + if not re.match(r"^\d+$", vlan): return False if int(vlan)<0 or int(vlan)>4094: return False @@ -88,8 +99,9 @@ if __name__ == "__main__": address = options.address finally: f.close() - except: - pass + except Exception: + master = None + address = "" # Get the management device from the firstboot data if not specified by the user if options.device == None: @@ -192,13 +204,8 @@ Type 'no' to cancel. # Update master's IP, if needed and given if master == False and options.address != None: print("Setting master's ip (" + address + ")...") - try: - f = open(pool_conf, 'w') + with fsync_write(pool_conf) as f: f.write('slave:' + address) - finally: - f.flush() - os.fsync(f.fileno()) - f.close() # Construct bridge name for management interface based on convention if device[:3] == 'eth': @@ -230,8 +237,7 @@ Type 'no' to cancel. # Rewrite firstboot management.conf file, which will be picked it by xcp-networkd on restart (if used) is_static = False - try: - f = open(management_conf, 'w') + with fsync_write(management_conf) as f: f.write("LABEL='" + device + "'\n") if options.mode != "none": f.write("MODE='" + options.mode + "'\n") @@ -252,14 +258,9 @@ Type 'no' to cancel. f.write("IPv6_GATEWAY='" + options.gateway_v6 + "'\n") if is_static and options.dns != '': f.write("DNS='" + options.dns + "'\n") - finally: - f.flush() - os.fsync(f.fileno()) - f.close() # Write trigger file for XAPI to continue the network reset on startup - try: - f = open(network_reset, 'w') + with fsync_write(network_reset) as f: f.write('DEVICE=' + device + '\n') if options.mode != "none": f.write('MODE=' + options.mode + '\n') @@ -278,10 +279,6 @@ Type 'no' to cancel. f.write('GATEWAY_V6=' + options.gateway_v6 + '\n') if is_static and options.dns != '': f.write('DNS=' + options.dns + '\n') - finally: - f.flush() - os.fsync(f.fileno()) - f.close() # Reset the domain 0 network interface naming configuration # back to a fresh-install state for the currently-installed diff --git a/scripts/Makefile b/scripts/Makefile index bbaa23db002..87302dca48f 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -89,7 +89,6 @@ install: mkdir -p $(DESTDIR)$(OPTDIR)/debug $(IPROG) debug_ha_query_liveset $(DESTDIR)$(OPTDIR)/debug $(IPROG) xe-mount-iso-sr $(DESTDIR)$(OPTDIR)/bin - $(IPROG) xe-reset-networking $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-toolstack-restart $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-xentrace $(DESTDIR)$(OPTDIR)/bin $(IPROG) xe-edit-bootloader $(DESTDIR)$(OPTDIR)/bin From 8056f9adf8ba494c4b9216fc1cafc2a687f70558 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 183/341] plugins/extauth-hook-AD.py: Fix 'Stray abstractmethod' pytype warning Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth-hook-AD.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/python3/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py index a67d35b10b8..41f982fbc77 100755 --- a/python3/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -28,7 +28,6 @@ # pylint: disable=too-few-public-methods -# pytype: disable=ignored-abstractmethod HCP_USERS = "/etc/security/hcp_ad_users.conf" @@ -77,7 +76,7 @@ class ADBackend(Enum): BD_WINBIND = 1 -class ADConfig(): +class ADConfig(abc.ABC): """Base class for AD configuration""" def __init__(self, path, session, args, ad_enabled=True, load_existing=True, file_mode=0o644): @@ -103,8 +102,7 @@ def _get_ad_backend(self): return ADBackend.BD_WINBIND @abc.abstractmethod - def _apply_to_cache(self): - pass + def _apply_to_cache(self): ... def apply(self): """Apply configuration""" @@ -224,12 +222,10 @@ def _is_responsible_for(self, subject_rec): return False @abc.abstractmethod - def _match_subject(self, subject_rec): - pass + def _match_subject(self, subject_rec): ... @abc.abstractmethod - def _add_subject(self, subject_rec): - pass + def _add_subject(self, subject_rec): ... def _install(self): if self._ad_enabled: From ed29ab93542ff268644d4f85f2c2e50cf1d7f6f1 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 184/341] plugins/extauth-hook-AD.py: Cleanup obsolete per-method pylint comments Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth-hook-AD.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python3/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py index a67d35b10b8..7db600476fd 100755 --- a/python3/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -170,7 +170,6 @@ def _apply_to_cache(self): class DynamicPam(ADConfig): - #pylint: disable=too-few-public-methods """Base class to manage AD users and groups configure which permit pool admin ssh""" def __init__(self, path, session, args, ad_enabled=True): @@ -239,7 +238,6 @@ def _install(self): class UsersList(DynamicPam): - #pylint: disable=too-few-public-methods """Class manage users which permit pool admin ssh""" def __init__(self, session, arg, ad_enabled=True): @@ -282,7 +280,6 @@ def _add_subject(self, subject_rec): class GroupsList(DynamicPam): - #pylint: disable=too-few-public-methods """Class manage groups which permit pool admin ssh""" def __init__(self, session, arg, ad_enabled=True): @@ -313,7 +310,6 @@ class KeyValueConfig(ADConfig): _special_line_prefix = "__key_value_config_sp_line_prefix_" _empty_value = "" - #pylint: disable=too-many-arguments def __init__(self, path, session, args, ad_enabled=True, load_existing=True, file_mode=0o644, sep=": ", comment="#"): super(KeyValueConfig, self).__init__(path, session, From 8cab68c2be6d42325b50243666720f1591e1df79 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 185/341] plugins/extauth-hook-AD.py: Modernise testee import to not need a symlink Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth_hook_ad.py | 1 - python3/plugins/test_extauth_hook_AD.py | 22 +++++++++++++++------- 2 files changed, 15 insertions(+), 8 deletions(-) delete mode 120000 python3/plugins/extauth_hook_ad.py diff --git a/python3/plugins/extauth_hook_ad.py b/python3/plugins/extauth_hook_ad.py deleted file mode 120000 index 19afff4d393..00000000000 --- a/python3/plugins/extauth_hook_ad.py +++ /dev/null @@ -1 +0,0 @@ -extauth-hook-AD.py \ No newline at end of file diff --git a/python3/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py index 616884101c6..8ae81bcbd19 100644 --- a/python3/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -8,13 +8,21 @@ from unittest import TestCase from unittest.mock import MagicMock, patch -# mock modules to avoid dependencies -sys.modules["XenAPIPlugin"] = MagicMock() -sys.modules["XenAPI"] = MagicMock() -# pylint: disable=wrong-import-position -# Import must after mock modules -from extauth_hook_ad import StaticSSHPam, NssConfig, SshdConfig, UsersList, GroupsList -from extauth_hook_ad import run_cmd +from python3.tests.import_helper import import_file_as_module, mocked_modules + + +with mocked_modules("XenAPIPlugin", "XenAPI"): + testee = import_file_as_module("python3/plugins/extauth-hook-AD.py") + # Will be replaced by updating the patch decorators + sys.modules["extauth_hook_ad"] = testee + # Will be replaced by updating the tests to call testee.function_name() + run_cmd = testee.run_cmd + NssConfig = testee.NssConfig + UsersList = testee.UsersList + GroupsList = testee.GroupsList + SshdConfig = testee.SshdConfig + StaticSSHPam = testee.StaticSSHPam + def test_run_cmd(caplog): """Assert the current buggy behavior of the run_cmd function after py3 migration""" From a0d3be8683f8e9c12561e936245dc8690b3d544d Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 186/341] plugins/test_extauth_hook_ad.py: Update the patch decorators to 'extauth-hook-AD' Signed-off-by: Bernhard Kaindl --- python3/plugins/test_extauth_hook_AD.py | 17 +++++++---------- python3/tests/import_helper.py | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/python3/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py index 8ae81bcbd19..3f0f22e40dd 100644 --- a/python3/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -4,7 +4,6 @@ import logging import os -import sys from unittest import TestCase from unittest.mock import MagicMock, patch @@ -13,8 +12,6 @@ with mocked_modules("XenAPIPlugin", "XenAPI"): testee = import_file_as_module("python3/plugins/extauth-hook-AD.py") - # Will be replaced by updating the patch decorators - sys.modules["extauth_hook_ad"] = testee # Will be replaced by updating the tests to call testee.function_name() run_cmd = testee.run_cmd NssConfig = testee.NssConfig @@ -132,9 +129,9 @@ def test_ad_enabled_with_pbis(self, mock_rename, mock_chmod): self.assertTrue(line_exists_in_config(static._lines, enabled_keyward)) -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestUsersList(TestCase): - @patch("extauth_hook_ad.open") + @patch("extauth_hook_AD.open") @patch("os.path.exists") @patch("os.remove") def test_ad_not_enabled(self, mock_remove, mock_exists, mock_open, mock_install): @@ -219,7 +216,7 @@ def test_failed_to_add_one_admin_should_not_affact_others(self, mock_install): self.assertNotIn(bad_user, dynamic._lines) -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestGroups(TestCase): def test_permit_admin_group(self, mock_install): # Domain group with admin role should be included in config file @@ -249,7 +246,7 @@ def test_permit_admin_group_with_space(self, mock_install): self.assertIn(permit_group, dynamic._lines) -@patch("extauth_hook_ad.ADConfig._install") +@patch("extauth_hook_AD.ADConfig._install") class TestNssConfig(TestCase): def test_ad_not_enabled(self, mock_install): expected_config = "passwd: files sss" @@ -264,9 +261,9 @@ def test_ad_enabled(self, mock_install): self.assertTrue(line_exists_in_config(nss._lines, expected_config)) -@patch("extauth_hook_ad.run_cmd") -@patch("extauth_hook_ad.ADConfig._install") -@patch("extauth_hook_ad.open") +@patch("extauth_hook_AD.run_cmd") +@patch("extauth_hook_AD.ADConfig._install") +@patch("extauth_hook_AD.open") class TestSshdConfig(TestCase): def test_ad_not_enabled(self, mock_open, mock_install, mock_run_cmd): expected_config = "ChallengeResponseAuthentication no" diff --git a/python3/tests/import_helper.py b/python3/tests/import_helper.py index 076a24913c7..2fdbd922b95 100644 --- a/python3/tests/import_helper.py +++ b/python3/tests/import_helper.py @@ -50,7 +50,7 @@ def import_file_as_module(relative_script_path): # type:(str) -> ModuleType - import_script_as_module('scripts/mail-alarm') # Returns the imported module. """ script_path = os.path.dirname(__file__) + "/../../" + relative_script_path - module_name = os.path.basename(script_path.replace(".py", "")) + module_name = os.path.basename(script_path).replace(".py", "").replace("-", "_") # For Python 3.11+: Import Python script without the .py extension: # https://gist.github.com/bernhardkaindl/1aaa04ea925fdc36c40d031491957fd3: From 3fe07e39d1e5a234cd30a030477627eaa3f5e6d5 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 187/341] storage-api: __init__.py: Use is_str() to check for string type Signed-off-by: Bernhard Kaindl --- ocaml/xapi-storage/python/xapi/__init__.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index 57a7c0c9f2d..fbef43ecde2 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -33,9 +33,23 @@ # pylint: disable=invalid-name,redefined-builtin,undefined-variable # pyright: reportUndefinedVariable=false + +# is_str(): Shortcut to check if a value is an instance of a string type. +# +# Replace: +# if not isinstance(code, str) and not isinstance(code, unicode): +# with: +# if not is_str(code): +# +# This makes for much cleaner code and suits Python3 well too. if sys.version_info[0] > 2: long = int - unicode = str + def is_str(x): + return isinstance(x, str) # With Python3, all strings are unicode +else: + def is_str(x): # pragma: no cover + return isinstance(x, (str, unicode)) # pylint: disable=undefined-variable + def success(result): return {"Status": "Success", "Value": result} @@ -72,7 +86,7 @@ class XenAPIException(Exception): def __init__(self, code, params): Exception.__init__(self) - if not isinstance(code, str) and not isinstance(code, unicode): + if not is_str(code): raise TypeError("string", repr(code)) if not isinstance(params, list): raise TypeError("list", repr(params)) From d034638da9fc9f6087afec69df5d67dee7d283c0 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 188/341] storage/api/{volume,plugin,datapath}.py: Apply isort, darker, import is_str Unused imports of is_long() are removed as well. Signed-off-by: Bernhard Kaindl --- .../python/xapi/storage/api/datapath.py | 20 +++++++++++++----- .../python/xapi/storage/api/plugin.py | 20 +++++++++++++----- .../python/xapi/storage/api/volume.py | 21 ++++++++++++++----- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py index 69b37e5a9e7..1bf426d5b2f 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py @@ -1,11 +1,21 @@ from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json + import argparse -import traceback +import json import logging +import sys +import traceback + +import xapi +from xapi import ( + InternalError, + Rpc_light_failure, + TypeError, + UnknownMethod, + UnmarshalException, + is_str, + success, +) # pylint: disable=invalid-name,redefined-builtin,undefined-variable # pyright: reportUndefinedVariable=false diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py index 1b6d37214ca..8e50736c72e 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py @@ -1,11 +1,21 @@ from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json + import argparse -import traceback +import json import logging +import sys +import traceback + +import xapi +from xapi import ( + InternalError, + Rpc_light_failure, + TypeError, + UnknownMethod, + UnmarshalException, + is_str, + success, +) # pylint: disable=invalid-name,redefined-builtin,undefined-variable # pyright: reportUndefinedVariable=false diff --git a/ocaml/xapi-storage/python/xapi/storage/api/volume.py b/ocaml/xapi-storage/python/xapi/storage/api/volume.py index b89574f9570..5beb31b57cb 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/volume.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/volume.py @@ -1,11 +1,22 @@ from __future__ import print_function -from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod -import xapi -import sys -import json + import argparse -import traceback +import json import logging +import sys +import traceback + +import xapi +from xapi import ( + InternalError, + Rpc_light_failure, + TypeError, + UnknownMethod, + UnmarshalException, + is_long, + is_str, + success, +) # pylint: disable=invalid-name,redefined-builtin,undefined-variable # pyright: reportUndefinedVariable=false From 3e636027b270faf0b464542c1a569931097e769a Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 189/341] storage/api/{volume,plugin,datapath}.py: Disable excessive pylint warnings Signed-off-by: Bernhard Kaindl --- ocaml/xapi-storage/python/xapi/__init__.py | 4 +--- ocaml/xapi-storage/python/xapi/storage/api/datapath.py | 2 ++ ocaml/xapi-storage/python/xapi/storage/api/plugin.py | 2 ++ ocaml/xapi-storage/python/xapi/storage/api/volume.py | 2 ++ pyproject.toml | 2 ++ 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index fbef43ecde2..50eae33fe1a 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -31,8 +31,6 @@ import json import argparse -# pylint: disable=invalid-name,redefined-builtin,undefined-variable -# pyright: reportUndefinedVariable=false # is_str(): Shortcut to check if a value is an instance of a string type. # @@ -138,7 +136,7 @@ def __init__(self, thing, ty, desc): "UnmarshalException thing=%s ty=%s desc=%s" % (thing, ty, desc)) -class TypeError(InternalError): +class TypeError(InternalError): # pylint: disable=redefined-builtin def __init__(self, expected, actual): InternalError.__init__( diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py index 1bf426d5b2f..1305f31cc9d 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py @@ -7,6 +7,8 @@ import traceback import xapi +# pylint: disable=line-too-long,superfluous-parens,unused-argument +# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi from xapi import ( InternalError, Rpc_light_failure, diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py index 8e50736c72e..69dce60fc77 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py @@ -7,6 +7,8 @@ import traceback import xapi +# pylint: disable=line-too-long,superfluous-parens,unused-argument +# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi from xapi import ( InternalError, Rpc_light_failure, diff --git a/ocaml/xapi-storage/python/xapi/storage/api/volume.py b/ocaml/xapi-storage/python/xapi/storage/api/volume.py index 5beb31b57cb..d530a3a3c8c 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/volume.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/volume.py @@ -7,6 +7,8 @@ import traceback import xapi +# pylint: disable=line-too-long,superfluous-parens,unused-argument +# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi from xapi import ( InternalError, Rpc_light_failure, diff --git a/pyproject.toml b/pyproject.toml index efdcd13494e..b8e4c984853 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -195,8 +195,10 @@ disable = [ "no-else-break", # else clause following a break statement "protected-access", # Best done during the code cleanup phase "super-with-arguments", # Consider using Python 3 style super(no args) calls + "too-few-public-methods", # Some classes only overload private methods, is fine "too-many-branches", # Existing code breaches this, not part of porting "too-many-arguments", # Likewise, not part of porting + "too-many-lines", # Likewise, not part of porting "too-many-locals", # Likewise, not part of porting "too-many-statements", # Likewise, not part of porting "unnecessary-pass", # Cosmetic, best done during the code cleanup phase From f74d7c1a3968ce5207dde3387e522001293aa1c7 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 190/341] storage/api/{volume,plugin,datapath}.py: Apply automatic conversion to is_str() Signed-off-by: Bernhard Kaindl --- .../python/xapi/storage/api/datapath.py | 42 ++-- .../python/xapi/storage/api/plugin.py | 34 +-- .../python/xapi/storage/api/volume.py | 228 +++++++++--------- 3 files changed, 152 insertions(+), 152 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py index 1305f31cc9d..0a4e82438fb 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py @@ -27,7 +27,7 @@ class Unimplemented(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Datapath_server_dispatcher: @@ -42,12 +42,12 @@ def open(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('persistent' in args): raise UnmarshalException('argument missing', 'persistent', '') @@ -63,29 +63,29 @@ def attach(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('domain' in args): raise UnmarshalException('argument missing', 'domain', '') domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): + if not is_str(domain): raise TypeError("string", repr(domain)) results = self._impl.attach(dbg, uri, domain) - if not isinstance(results['domain_uuid'], str) and not isinstance(results['domain_uuid'], unicode): + if not is_str(results['domain_uuid']): raise TypeError("string", repr(results['domain_uuid'])) if results['implementation'][0] == 'Blkback': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): + if not is_str(results['implementation'][1]): raise TypeError("string", repr(results['implementation'][1])) elif results['implementation'][0] == 'Tapdisk3': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): + if not is_str(results['implementation'][1]): raise TypeError("string", repr(results['implementation'][1])) elif results['implementation'][0] == 'Qdisk': - if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode): + if not is_str(results['implementation'][1]): raise TypeError("string", repr(results['implementation'][1])) return results def activate(self, args): @@ -95,17 +95,17 @@ def activate(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('domain' in args): raise UnmarshalException('argument missing', 'domain', '') domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): + if not is_str(domain): raise TypeError("string", repr(domain)) results = self._impl.activate(dbg, uri, domain) return results @@ -116,17 +116,17 @@ def deactivate(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('domain' in args): raise UnmarshalException('argument missing', 'domain', '') domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): + if not is_str(domain): raise TypeError("string", repr(domain)) results = self._impl.deactivate(dbg, uri, domain) return results @@ -137,17 +137,17 @@ def detach(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('domain' in args): raise UnmarshalException('argument missing', 'domain', '') domain = args["domain"] - if not isinstance(domain, str) and not isinstance(domain, unicode): + if not is_str(domain): raise TypeError("string", repr(domain)) results = self._impl.detach(dbg, uri, domain) return results @@ -158,12 +158,12 @@ def close(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) results = self._impl.close(dbg, uri) return results diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py index 69dce60fc77..d9199a98771 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py @@ -27,7 +27,7 @@ class Unimplemented(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Plugin_server_dispatcher: @@ -42,40 +42,40 @@ def query(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) results = self._impl.query(dbg) - if not isinstance(results['plugin'], str) and not isinstance(results['plugin'], unicode): + if not is_str(results['plugin']): raise TypeError("string", repr(results['plugin'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) - if not isinstance(results['vendor'], str) and not isinstance(results['vendor'], unicode): + if not is_str(results['vendor']): raise TypeError("string", repr(results['vendor'])) - if not isinstance(results['copyright'], str) and not isinstance(results['copyright'], unicode): + if not is_str(results['copyright']): raise TypeError("string", repr(results['copyright'])) - if not isinstance(results['version'], str) and not isinstance(results['version'], unicode): + if not is_str(results['version']): raise TypeError("string", repr(results['version'])) - if not isinstance(results['required_api_version'], str) and not isinstance(results['required_api_version'], unicode): + if not is_str(results['required_api_version']): raise TypeError("string", repr(results['required_api_version'])) if not isinstance(results['features'], list): raise TypeError("string list", repr(results['features'])) for tmp_1 in results['features']: - if not isinstance(tmp_1, str) and not isinstance(tmp_1, unicode): + if not is_str(tmp_1): raise TypeError("string", repr(tmp_1)) if not isinstance(results['configuration'], dict): raise TypeError("(string * string) list", repr(results['configuration'])) for tmp_2 in results['configuration'].keys(): - if not isinstance(tmp_2, str) and not isinstance(tmp_2, unicode): + if not is_str(tmp_2): raise TypeError("string", repr(tmp_2)) for tmp_2 in results['configuration'].values(): - if not isinstance(tmp_2, str) and not isinstance(tmp_2, unicode): + if not is_str(tmp_2): raise TypeError("string", repr(tmp_2)) if not isinstance(results['required_cluster_stack'], list): raise TypeError("string list", repr(results['required_cluster_stack'])) for tmp_3 in results['required_cluster_stack']: - if not isinstance(tmp_3, str) and not isinstance(tmp_3, unicode): + if not is_str(tmp_3): raise TypeError("string", repr(tmp_3)) return results def ls(self, args): @@ -85,13 +85,13 @@ def ls(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) results = self._impl.ls(dbg) if not isinstance(results, list): raise TypeError("string list", repr(results)) for tmp_4 in results: - if not isinstance(tmp_4, str) and not isinstance(tmp_4, unicode): + if not is_str(tmp_4): raise TypeError("string", repr(tmp_4)) return results def diagnostics(self, args): @@ -101,10 +101,10 @@ def diagnostics(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) results = self._impl.diagnostics(dbg) - if not isinstance(results, str) and not isinstance(results, unicode): + if not is_str(results): raise TypeError("string", repr(results)) return results def _dispatch(self, method, params): diff --git a/ocaml/xapi-storage/python/xapi/storage/api/volume.py b/ocaml/xapi-storage/python/xapi/storage/api/volume.py index d530a3a3c8c..0f01ed6fd97 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/volume.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/volume.py @@ -30,31 +30,31 @@ class Sr_not_attached(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Sr_not_attached", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class SR_does_not_exist(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "SR_does_not_exist", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Volume_does_not_exist(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Volume_does_not_exist", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Unimplemented(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Cancelled(Rpc_light_failure): def __init__(self, arg_0): Rpc_light_failure.__init__(self, "Cancelled", [ arg_0 ]) - if not isinstance(arg_0, str) and not isinstance(arg_0, unicode): + if not is_str(arg_0): raise TypeError("string", repr(arg_0)) self.arg_0 = arg_0 class Volume_server_dispatcher: @@ -69,22 +69,22 @@ def create(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('name' in args): raise UnmarshalException('argument missing', 'name', '') name = args["name"] - if not isinstance(name, str) and not isinstance(name, unicode): + if not is_str(name): raise TypeError("string", repr(name)) if not('description' in args): raise UnmarshalException('argument missing', 'description', '') description = args["description"] - if not isinstance(description, str) and not isinstance(description, unicode): + if not is_str(description): raise TypeError("string", repr(description)) if not('size' in args): raise UnmarshalException('argument missing', 'size', '') @@ -92,14 +92,14 @@ def create(self, args): if not(is_long(size)): raise TypeError("int64", repr(size)) results = self._impl.create(dbg, sr, name, description, size) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): + if not is_str(results['key']): raise TypeError("string", repr(results['key'])) if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): + if not is_str(results['uuid']): raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) if not isinstance(results['read_write'], bool): raise TypeError("bool", repr(results['read_write'])) @@ -110,15 +110,15 @@ def create(self, args): if not isinstance(results['uri'], list): raise TypeError("string list", repr(results['uri'])) for tmp_5 in results['uri']: - if not isinstance(tmp_5, str) and not isinstance(tmp_5, unicode): + if not is_str(tmp_5): raise TypeError("string", repr(tmp_5)) if not isinstance(results['keys'], dict): raise TypeError("(string * string) list", repr(results['keys'])) for tmp_6 in results['keys'].keys(): - if not isinstance(tmp_6, str) and not isinstance(tmp_6, unicode): + if not is_str(tmp_6): raise TypeError("string", repr(tmp_6)) for tmp_6 in results['keys'].values(): - if not isinstance(tmp_6, str) and not isinstance(tmp_6, unicode): + if not is_str(tmp_6): raise TypeError("string", repr(tmp_6)) return results def snapshot(self, args): @@ -128,27 +128,27 @@ def snapshot(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) results = self._impl.snapshot(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): + if not is_str(results['key']): raise TypeError("string", repr(results['key'])) if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): + if not is_str(results['uuid']): raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) if not isinstance(results['read_write'], bool): raise TypeError("bool", repr(results['read_write'])) @@ -159,15 +159,15 @@ def snapshot(self, args): if not isinstance(results['uri'], list): raise TypeError("string list", repr(results['uri'])) for tmp_7 in results['uri']: - if not isinstance(tmp_7, str) and not isinstance(tmp_7, unicode): + if not is_str(tmp_7): raise TypeError("string", repr(tmp_7)) if not isinstance(results['keys'], dict): raise TypeError("(string * string) list", repr(results['keys'])) for tmp_8 in results['keys'].keys(): - if not isinstance(tmp_8, str) and not isinstance(tmp_8, unicode): + if not is_str(tmp_8): raise TypeError("string", repr(tmp_8)) for tmp_8 in results['keys'].values(): - if not isinstance(tmp_8, str) and not isinstance(tmp_8, unicode): + if not is_str(tmp_8): raise TypeError("string", repr(tmp_8)) return results def clone(self, args): @@ -177,27 +177,27 @@ def clone(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) results = self._impl.clone(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): + if not is_str(results['key']): raise TypeError("string", repr(results['key'])) if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): + if not is_str(results['uuid']): raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) if not isinstance(results['read_write'], bool): raise TypeError("bool", repr(results['read_write'])) @@ -208,15 +208,15 @@ def clone(self, args): if not isinstance(results['uri'], list): raise TypeError("string list", repr(results['uri'])) for tmp_9 in results['uri']: - if not isinstance(tmp_9, str) and not isinstance(tmp_9, unicode): + if not is_str(tmp_9): raise TypeError("string", repr(tmp_9)) if not isinstance(results['keys'], dict): raise TypeError("(string * string) list", repr(results['keys'])) for tmp_10 in results['keys'].keys(): - if not isinstance(tmp_10, str) and not isinstance(tmp_10, unicode): + if not is_str(tmp_10): raise TypeError("string", repr(tmp_10)) for tmp_10 in results['keys'].values(): - if not isinstance(tmp_10, str) and not isinstance(tmp_10, unicode): + if not is_str(tmp_10): raise TypeError("string", repr(tmp_10)) return results def destroy(self, args): @@ -226,17 +226,17 @@ def destroy(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) results = self._impl.destroy(dbg, sr, key) return results @@ -247,22 +247,22 @@ def set_name(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) if not('new_name' in args): raise UnmarshalException('argument missing', 'new_name', '') new_name = args["new_name"] - if not isinstance(new_name, str) and not isinstance(new_name, unicode): + if not is_str(new_name): raise TypeError("string", repr(new_name)) results = self._impl.set_name(dbg, sr, key, new_name) return results @@ -273,22 +273,22 @@ def set_description(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) if not('new_description' in args): raise UnmarshalException('argument missing', 'new_description', '') new_description = args["new_description"] - if not isinstance(new_description, str) and not isinstance(new_description, unicode): + if not is_str(new_description): raise TypeError("string", repr(new_description)) results = self._impl.set_description(dbg, sr, key, new_description) return results @@ -299,27 +299,27 @@ def set(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) if not('k' in args): raise UnmarshalException('argument missing', 'k', '') k = args["k"] - if not isinstance(k, str) and not isinstance(k, unicode): + if not is_str(k): raise TypeError("string", repr(k)) if not('v' in args): raise UnmarshalException('argument missing', 'v', '') v = args["v"] - if not isinstance(v, str) and not isinstance(v, unicode): + if not is_str(v): raise TypeError("string", repr(v)) results = self._impl.set(dbg, sr, key, k, v) return results @@ -330,22 +330,22 @@ def unset(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) if not('k' in args): raise UnmarshalException('argument missing', 'k', '') k = args["k"] - if not isinstance(k, str) and not isinstance(k, unicode): + if not is_str(k): raise TypeError("string", repr(k)) results = self._impl.unset(dbg, sr, key, k) return results @@ -356,17 +356,17 @@ def resize(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) if not('new_size' in args): raise UnmarshalException('argument missing', 'new_size', '') @@ -382,27 +382,27 @@ def stat(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('key' in args): raise UnmarshalException('argument missing', 'key', '') key = args["key"] - if not isinstance(key, str) and not isinstance(key, unicode): + if not is_str(key): raise TypeError("string", repr(key)) results = self._impl.stat(dbg, sr, key) - if not isinstance(results['key'], str) and not isinstance(results['key'], unicode): + if not is_str(results['key']): raise TypeError("string", repr(results['key'])) if results['uuid'] is not None: - if not isinstance(results['uuid'], str) and not isinstance(results['uuid'], unicode): + if not is_str(results['uuid']): raise TypeError("string", repr(results['uuid'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) if not isinstance(results['read_write'], bool): raise TypeError("bool", repr(results['read_write'])) @@ -413,15 +413,15 @@ def stat(self, args): if not isinstance(results['uri'], list): raise TypeError("string list", repr(results['uri'])) for tmp_11 in results['uri']: - if not isinstance(tmp_11, str) and not isinstance(tmp_11, unicode): + if not is_str(tmp_11): raise TypeError("string", repr(tmp_11)) if not isinstance(results['keys'], dict): raise TypeError("(string * string) list", repr(results['keys'])) for tmp_12 in results['keys'].keys(): - if not isinstance(tmp_12, str) and not isinstance(tmp_12, unicode): + if not is_str(tmp_12): raise TypeError("string", repr(tmp_12)) for tmp_12 in results['keys'].values(): - if not isinstance(tmp_12, str) and not isinstance(tmp_12, unicode): + if not is_str(tmp_12): raise TypeError("string", repr(tmp_12)) return results def _dispatch(self, method, params): @@ -814,22 +814,22 @@ def probe(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) results = self._impl.probe(dbg, uri) if not isinstance(results['srs'], list): raise TypeError("7 list", repr(results['srs'])) for tmp_13 in results['srs']: - if not isinstance(tmp_13['sr'], str) and not isinstance(tmp_13['sr'], unicode): + if not is_str(tmp_13['sr']): raise TypeError("string", repr(tmp_13['sr'])) - if not isinstance(tmp_13['name'], str) and not isinstance(tmp_13['name'], unicode): + if not is_str(tmp_13['name']): raise TypeError("string", repr(tmp_13['name'])) - if not isinstance(tmp_13['description'], str) and not isinstance(tmp_13['description'], unicode): + if not is_str(tmp_13['description']): raise TypeError("string", repr(tmp_13['description'])) if not(is_long(tmp_13['free_space'])): raise TypeError("int64", repr(tmp_13['free_space'])) @@ -838,20 +838,20 @@ def probe(self, args): if not isinstance(tmp_13['datasources'], list): raise TypeError("string list", repr(tmp_13['datasources'])) for tmp_14 in tmp_13['datasources']: - if not isinstance(tmp_14, str) and not isinstance(tmp_14, unicode): + if not is_str(tmp_14): raise TypeError("string", repr(tmp_14)) if not isinstance(tmp_13['clustered'], bool): raise TypeError("bool", repr(tmp_13['clustered'])) if tmp_13['health'][0] == 'Healthy': - if not isinstance(tmp_13['health'][1], str) and not isinstance(tmp_13['health'][1], unicode): + if not is_str(tmp_13['health'][1]): raise TypeError("string", repr(tmp_13['health'][1])) elif tmp_13['health'][0] == 'Recovering': - if not isinstance(tmp_13['health'][1], str) and not isinstance(tmp_13['health'][1], unicode): + if not is_str(tmp_13['health'][1]): raise TypeError("string", repr(tmp_13['health'][1])) if not isinstance(results['uris'], list): raise TypeError("string list", repr(results['uris'])) for tmp_15 in results['uris']: - if not isinstance(tmp_15, str) and not isinstance(tmp_15, unicode): + if not is_str(tmp_15): raise TypeError("string", repr(tmp_15)) return results def create(self, args): @@ -861,22 +861,22 @@ def create(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) if not('name' in args): raise UnmarshalException('argument missing', 'name', '') name = args["name"] - if not isinstance(name, str) and not isinstance(name, unicode): + if not is_str(name): raise TypeError("string", repr(name)) if not('description' in args): raise UnmarshalException('argument missing', 'description', '') description = args["description"] - if not isinstance(description, str) and not isinstance(description, unicode): + if not is_str(description): raise TypeError("string", repr(description)) if not('configuration' in args): raise UnmarshalException('argument missing', 'configuration', '') @@ -884,10 +884,10 @@ def create(self, args): if not isinstance(configuration, dict): raise TypeError("(string * string) list", repr(configuration)) for tmp_16 in configuration.keys(): - if not isinstance(tmp_16, str) and not isinstance(tmp_16, unicode): + if not is_str(tmp_16): raise TypeError("string", repr(tmp_16)) for tmp_16 in configuration.values(): - if not isinstance(tmp_16, str) and not isinstance(tmp_16, unicode): + if not is_str(tmp_16): raise TypeError("string", repr(tmp_16)) results = self._impl.create(dbg, uri, name, description, configuration) return results @@ -898,15 +898,15 @@ def attach(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('uri' in args): raise UnmarshalException('argument missing', 'uri', '') uri = args["uri"] - if not isinstance(uri, str) and not isinstance(uri, unicode): + if not is_str(uri): raise TypeError("string", repr(uri)) results = self._impl.attach(dbg, uri) - if not isinstance(results, str) and not isinstance(results, unicode): + if not is_str(results): raise TypeError("string", repr(results)) return results def detach(self, args): @@ -916,12 +916,12 @@ def detach(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) results = self._impl.detach(dbg, sr) return results @@ -932,12 +932,12 @@ def destroy(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) results = self._impl.destroy(dbg, sr) return results @@ -948,19 +948,19 @@ def stat(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) results = self._impl.stat(dbg, sr) - if not isinstance(results['sr'], str) and not isinstance(results['sr'], unicode): + if not is_str(results['sr']): raise TypeError("string", repr(results['sr'])) - if not isinstance(results['name'], str) and not isinstance(results['name'], unicode): + if not is_str(results['name']): raise TypeError("string", repr(results['name'])) - if not isinstance(results['description'], str) and not isinstance(results['description'], unicode): + if not is_str(results['description']): raise TypeError("string", repr(results['description'])) if not(is_long(results['free_space'])): raise TypeError("int64", repr(results['free_space'])) @@ -969,15 +969,15 @@ def stat(self, args): if not isinstance(results['datasources'], list): raise TypeError("string list", repr(results['datasources'])) for tmp_17 in results['datasources']: - if not isinstance(tmp_17, str) and not isinstance(tmp_17, unicode): + if not is_str(tmp_17): raise TypeError("string", repr(tmp_17)) if not isinstance(results['clustered'], bool): raise TypeError("bool", repr(results['clustered'])) if results['health'][0] == 'Healthy': - if not isinstance(results['health'][1], str) and not isinstance(results['health'][1], unicode): + if not is_str(results['health'][1]): raise TypeError("string", repr(results['health'][1])) elif results['health'][0] == 'Recovering': - if not isinstance(results['health'][1], str) and not isinstance(results['health'][1], unicode): + if not is_str(results['health'][1]): raise TypeError("string", repr(results['health'][1])) return results def set_name(self, args): @@ -987,17 +987,17 @@ def set_name(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('new_name' in args): raise UnmarshalException('argument missing', 'new_name', '') new_name = args["new_name"] - if not isinstance(new_name, str) and not isinstance(new_name, unicode): + if not is_str(new_name): raise TypeError("string", repr(new_name)) results = self._impl.set_name(dbg, sr, new_name) return results @@ -1008,17 +1008,17 @@ def set_description(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) if not('new_description' in args): raise UnmarshalException('argument missing', 'new_description', '') new_description = args["new_description"] - if not isinstance(new_description, str) and not isinstance(new_description, unicode): + if not is_str(new_description): raise TypeError("string", repr(new_description)) results = self._impl.set_description(dbg, sr, new_description) return results @@ -1029,25 +1029,25 @@ def ls(self, args): if not('dbg' in args): raise UnmarshalException('argument missing', 'dbg', '') dbg = args["dbg"] - if not isinstance(dbg, str) and not isinstance(dbg, unicode): + if not is_str(dbg): raise TypeError("string", repr(dbg)) if not('sr' in args): raise UnmarshalException('argument missing', 'sr', '') sr = args["sr"] - if not isinstance(sr, str) and not isinstance(sr, unicode): + if not is_str(sr): raise TypeError("string", repr(sr)) results = self._impl.ls(dbg, sr) if not isinstance(results, list): raise TypeError("8 list", repr(results)) for tmp_18 in results: - if not isinstance(tmp_18['key'], str) and not isinstance(tmp_18['key'], unicode): + if not is_str(tmp_18['key']): raise TypeError("string", repr(tmp_18['key'])) if tmp_18['uuid'] is not None: - if not isinstance(tmp_18['uuid'], str) and not isinstance(tmp_18['uuid'], unicode): + if not is_str(tmp_18['uuid']): raise TypeError("string", repr(tmp_18['uuid'])) - if not isinstance(tmp_18['name'], str) and not isinstance(tmp_18['name'], unicode): + if not is_str(tmp_18['name']): raise TypeError("string", repr(tmp_18['name'])) - if not isinstance(tmp_18['description'], str) and not isinstance(tmp_18['description'], unicode): + if not is_str(tmp_18['description']): raise TypeError("string", repr(tmp_18['description'])) if not isinstance(tmp_18['read_write'], bool): raise TypeError("bool", repr(tmp_18['read_write'])) @@ -1058,15 +1058,15 @@ def ls(self, args): if not isinstance(tmp_18['uri'], list): raise TypeError("string list", repr(tmp_18['uri'])) for tmp_19 in tmp_18['uri']: - if not isinstance(tmp_19, str) and not isinstance(tmp_19, unicode): + if not is_str(tmp_19): raise TypeError("string", repr(tmp_19)) if not isinstance(tmp_18['keys'], dict): raise TypeError("(string * string) list", repr(tmp_18['keys'])) for tmp_20 in tmp_18['keys'].keys(): - if not isinstance(tmp_20, str) and not isinstance(tmp_20, unicode): + if not is_str(tmp_20): raise TypeError("string", repr(tmp_20)) for tmp_20 in tmp_18['keys'].values(): - if not isinstance(tmp_20, str) and not isinstance(tmp_20, unicode): + if not is_str(tmp_20): raise TypeError("string", repr(tmp_20)) return results def _dispatch(self, method, params): From d83218f61810f5d9adc9ddb416de23dd51d59a6a Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 191/341] storage-api: Add pytest for datapath.Datapath_server_dispatcher() Signed-off-by: Bernhard Kaindl --- .github/workflows/other.yml | 7 +- .../python/xapi/storage/api/test_datapath.py | 127 ++++++++++++++++++ pyproject.toml | 2 +- 3 files changed, 130 insertions(+), 6 deletions(-) create mode 100644 ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 17f91991da5..7cf672afd14 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -49,13 +49,10 @@ jobs: if: ${{ matrix.python-version == '2.7' }} run: > pip install enum future mock pytest-coverage pytest-mock && - pytest - --cov=scripts --cov=ocaml/xcp-rrdd - scripts/ ocaml/xcp-rrdd -vv -rA - --junitxml=.git/pytest${{matrix.python-version}}.xml + pytest -vv -rA --cov=ocaml ocaml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml - --cov-fail-under 0 + --cov-fail-under 60 env: PYTHONDEVMODE: yes diff --git a/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py new file mode 100644 index 00000000000..9bea7377391 --- /dev/null +++ b/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py @@ -0,0 +1,127 @@ +import logging + +import pytest + +import xapi +import xapi.storage.api.datapath + + +def internal_error(error): + """Return a dictionary with an internal error""" + return {"ErrorDescription": ["Internal_error", error], "Status": "Failure"} + + +def assert_error(testee, caplog, method_args, method, error): + """Assert that the result of the testee matches the expected error result""" + args = method_args.copy() + if method != "open": # the persistent arg is only checked for the open method + args["persistent"] = None # pass it, but with a wrong type(not used/checked) + assert testee._dispatch("Datapath." + method, [args]) == internal_error(error) + assert caplog.messages[0] == "caught " + error + caplog.clear() + + +def assert_type_checks(testee, methods, template_args, bad_args, caplog): + """Assert that the result of the testee matches the expected result""" + for arg in bad_args: + # Sigh, if Python would be strongly typed, we wouldn't need this: + # Assert the type checks of the arguments + expected = "bool" if arg == "persistent" else "string" + other_type = False if expected == "string" else "str" + for actual in [None, [], (), {"dict": "val"}, 1, 1.0, str, caplog, other_type]: + bad_args = template_args.copy() + bad_args[arg] = actual + error_msg = "TypeError expected={} actual={}".format(expected, repr(actual)) + for method in methods: + assert_error(testee, caplog, bad_args, method, error_msg) + + # Remove the argument and assert the missing argument checks + bad_args.pop(arg) + error_msg = "UnmarshalException thing=argument missing ty={} desc=".format(arg) + for method in methods: + assert_error(testee, caplog, bad_args, method, error_msg) + + +def test_dispatcher(caplog, capsys): + """ + Test the dispatcher of the Xapi storage API datapath interface + + The dispatcher is a class that routes the calls to the corresponding methods + of a given Datapath implementation class. + """ + # Setup + caplog.set_level(logging.INFO) + + # The testee passes them to the Datapath_test class and its attach method + # is expected to return the values which we use to test the dispatcher: + args = {"dbg": "", "uri": "uri", "domain": "uuid", "persistent": True} + + # Call + + # datapath_server_test() returns an instance of the dispatcher class that + # routes the calls to the corresponding methods of the Datapath_test class: + testee = xapi.storage.api.datapath.datapath_server_test() + + # Test the argument checks of the dispatcher to identify missing arguments: + + # Assert type checks on the dbg and uri arguments + missing = ["dbg", "uri"] + methods = ["attach", "activate", "deactivate", "detach", "open", "close"] + assert_type_checks(testee, methods, args, missing, caplog) + + # Assert type checks on the missing domain argument + missing = ["domain"] + methods = ["attach", "activate", "deactivate", "detach"] + assert_type_checks(testee, methods, args, missing, caplog) + + # Assert type checks on the persistent flag for the open method + missing = ["persistent"] + methods = ["open"] + assert_type_checks(testee, methods, args, missing, caplog) + + # BUG: Datapath_test.attach() currently returns an mismatching dictionary: + # The dispatcher expects a dict with a "domain_uuid" key, but the implementation + # Datapath_test.attach() returns a dict with a "backend" key instead. + # Changing the implementation of Datapath_test.attach() will fix this issue. + + # This WOULD be an example expected result, BUT the implementation of + # Datapath_test.attach() returns an invalid dictionary to the dispatcher: + assert testee._dispatch("Datapath.attach", [args]) != { + "Status": "Success", + "Value": {"domain_uuid": "uuid", "implementation": ("uri", "dbg")}, + } + + # BUG: This is the internal error that Datapath_test.attach() currently triggers: + assert testee._dispatch("Datapath.attach", [args]) == { + "ErrorDescription": ["Internal_error", "'domain_uuid'"], + "Status": "Failure", + } + assert caplog.messages[0] == "caught 'domain_uuid'" + caplog.clear() + + # The other methods work as expected. Setup, Call, Assert: + success = {"Status": "Success", "Value": {}} + assert testee._dispatch("Datapath.open", [args]) == success + assert testee._dispatch("Datapath.activate", [args]) == success + assert testee._dispatch("Datapath.deactivate", [args]) == success + assert testee._dispatch("Datapath.detach", [args]) == success + assert testee._dispatch("Datapath.close", [args]) == success + + # Assert that no errors were logged and no output was printed: + assert caplog.messages == [] # No messages were logged + assert capsys.readouterr().out == "" # No output was printed + assert capsys.readouterr().err == "" # No errors were printed + + +def test_exceptions(): + """Cover the code changed by using the is_str() function""" + + with pytest.raises(xapi.TypeError) as exc_info: + _ = xapi.XenAPIException(1, "params") # pylint: disable=pointless-statement + assert str(exc_info.value) == "TypeError expected=string actual=1" + + with pytest.raises(xapi.TypeError) as exc_info: + _ = xapi.storage.api.datapath.Unimplemented( + False + ) # pylint: disable=pointless-statement + assert str(exc_info.value) == "TypeError expected=string actual=False" diff --git a/pyproject.toml b/pyproject.toml index b8e4c984853..8f1b5931255 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -260,7 +260,7 @@ addopts = "-v -ra" # xfail_strict: require to remove pytext.xfail marker when test is fixed # required_plugins: require that these plugins are installed before testing # ----------------------------------------------------------------------------- -testpaths = ["python3", "scripts", "ocaml/xcp-rrdd"] +testpaths = ["python3", "ocaml/xcp-rrdd", "ocaml/xapi-storage"] required_plugins = ["pytest-cov", "pytest-mock"] log_cli_level = "INFO" log_cli = true From c23f314c039404092774b727664d5937472355ae Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 192/341] storage-api: Use Datapath_test.attach to test Datapath_server_dispatcher.attach Signed-off-by: Bernhard Kaindl --- .../python/xapi/storage/api/datapath.py | 29 ++++++++++-- .../python/xapi/storage/api/test_datapath.py | 47 ++++++++++++------- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py index 0a4e82438fb..957b8c0362f 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py @@ -205,7 +205,11 @@ def close(self, dbg, uri): """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" raise Unimplemented("Datapath.close") class Datapath_test: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" + """ + Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. + Every function is idempotent. Every function takes a domain parameter which allows + the implementation to track how many domains are currently using the volume. + """ def __init__(self): pass def open(self, dbg, uri, persistent): @@ -213,10 +217,27 @@ def open(self, dbg, uri, persistent): result = {} return result def attach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - result["backend"] = { "domain_uuid": "string", "implementation": None } + # type:(str, str, str) -> dict[str, tuple[str, Any] | str] + """ + Return a valid results dictionary to Datapath_server_dispatcher.attach() + + The returned dict must contain the "domain_uuid" key with a string value. + The returned dict must contain the "implementation" key with two elements: + If the first element is one of "Blkback", "Tapdisk3" or "Qdisk", + the second element must be a string. Else, the dispatcher returns an error. + + See Datapath_server_dispatcher.attach() for the implementation details. + """ + # Fixed to not raise an internal error in Datapath_server_dispatcher.attach(): + result = { "domain_uuid": domain, "implementation": (uri, dbg) } + if not domain: # Provoke an internal error in the dispatcher to cover its code + result.pop("domain_uuid") # by removing the required "domain_uuid" key. + if domain == "5": + result["domain_uuid"] = 5 # Return an integer to provoke a type error. + if dbg == "inject_error" and uri in ["Blkback", "Tapdisk3", "Qdisk"]: + result["implementation"] = (uri, False) return result + def activate(self, dbg, uri, domain): """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" result = {} diff --git a/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py index 9bea7377391..8b6436657da 100644 --- a/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py +++ b/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py @@ -42,6 +42,26 @@ def assert_type_checks(testee, methods, template_args, bad_args, caplog): assert_error(testee, caplog, bad_args, method, error_msg) +def assert_attach_type_check(testee, caplog, args, uri): + """Assert that the result of the testee matches the expected result""" + a = args.copy() + a["uri"] = uri + assert testee._dispatch("Datapath.attach", [a]) == { + "Status": "Success", + "Value": {"domain_uuid": a["domain"], "implementation": (uri, a["dbg"])}, + } + if uri == "other": + return + a["dbg"] = "inject_error" + assert_error(testee, caplog, a, "attach", "TypeError expected=string actual=False") + + +def assert_attach_type_checks(testee, caplog, args): + """Assert type checks when attach() returns Blkback, Tapdisk3, Qdisk and others""" + for uri in ["Blkback", "Tapdisk3", "Qdisk", "other"]: + assert_attach_type_check(testee, caplog, args, uri) + + def test_dispatcher(caplog, capsys): """ Test the dispatcher of the Xapi storage API datapath interface @@ -79,25 +99,16 @@ def test_dispatcher(caplog, capsys): methods = ["open"] assert_type_checks(testee, methods, args, missing, caplog) - # BUG: Datapath_test.attach() currently returns an mismatching dictionary: - # The dispatcher expects a dict with a "domain_uuid" key, but the implementation - # Datapath_test.attach() returns a dict with a "backend" key instead. - # Changing the implementation of Datapath_test.attach() will fix this issue. - - # This WOULD be an example expected result, BUT the implementation of - # Datapath_test.attach() returns an invalid dictionary to the dispatcher: - assert testee._dispatch("Datapath.attach", [args]) != { - "Status": "Success", - "Value": {"domain_uuid": "uuid", "implementation": ("uri", "dbg")}, - } + # Assert the dispatcher returns the example results of Datapath_test.attach(): + assert_attach_type_checks(testee, caplog, args) - # BUG: This is the internal error that Datapath_test.attach() currently triggers: - assert testee._dispatch("Datapath.attach", [args]) == { - "ErrorDescription": ["Internal_error", "'domain_uuid'"], - "Status": "Failure", - } - assert caplog.messages[0] == "caught 'domain_uuid'" - caplog.clear() + # Assert the internal error to cover the check by removing the domain argument: + bad = args.copy() + bad["domain"] = "" + assert_error(testee, caplog, bad, "attach", "'domain_uuid'") + # Assert the type check on the domain_uuid return value: + bad["domain"] = "5" + assert_error(testee, caplog, bad, "attach", "TypeError expected=string actual=5") # The other methods work as expected. Setup, Call, Assert: success = {"Status": "Success", "Value": {}} From 8544efe0f624b9d860ee7f60e94e5c97a8ddd2d0 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Tue, 16 Jul 2024 12:00:00 +0200 Subject: [PATCH 193/341] extauth-hook-AD.py: Delete PBIS code (PBIS was removed from XS8) Signed-off-by: Bernhard Kaindl --- python3/plugins/extauth-hook-AD.py | 49 ++----------------- python3/plugins/test_extauth_hook_AD.py | 62 ++----------------------- 2 files changed, 9 insertions(+), 102 deletions(-) diff --git a/python3/plugins/extauth-hook-AD.py b/python3/plugins/extauth-hook-AD.py index 5ca0f705846..d3e89aae8c8 100755 --- a/python3/plugins/extauth-hook-AD.py +++ b/python3/plugins/extauth-hook-AD.py @@ -23,7 +23,7 @@ import logging import logging.handlers from collections import OrderedDict -from enum import Enum + import XenAPIPlugin @@ -70,12 +70,6 @@ def run_cmd(command: "list[str]"): logger.exception("Failed to run command %s", command) -class ADBackend(Enum): - """Enum for AD backend""" - BD_PBIS = 0 - BD_WINBIND = 1 - - class ADConfig(abc.ABC): """Base class for AD configuration""" @@ -84,7 +78,6 @@ def __init__(self, path, session, args, ad_enabled=True, load_existing=True, fil self._session = session self._args = args self._lines = [] - self._backend = self._get_ad_backend() self._ad_enabled = ad_enabled self._file_mode = file_mode if load_existing and os.path.exists(self._file_path): @@ -92,14 +85,6 @@ def __init__(self, path, session, args, ad_enabled=True, load_existing=True, fil lines = file.readlines() self._lines = [l.strip() for l in lines] - def _get_ad_backend(self): - """Get active AD backend""" - if self._args.get("ad_backend", "winbind") == "pbis": - logger.debug("pbis is used as AD backend") - return ADBackend.BD_PBIS - - logger.debug("winbind is used as AD backend") - return ADBackend.BD_WINBIND @abc.abstractmethod def _apply_to_cache(self): ... @@ -156,11 +141,7 @@ def __init__(self, session, args, ad_enabled=True): def _apply_to_cache(self): if self._ad_enabled: - if self._backend == ADBackend.BD_PBIS: - ad_pam_module = "/lib/security/pam_lsass.so" - else: - ad_pam_module = "pam_winbind.so" - content = self.ad_pam_format.format(ad_module=ad_pam_module, + content = self.ad_pam_format.format(ad_module="pam_winbind.so", user_list=HCP_USERS, group_list=HCP_GROUPS) else: content = self.no_ad_pam @@ -202,16 +183,6 @@ def _is_pool_admin(self, subject_rec): logger.warning("subject %s does not have role", subject_rec) return False - def _format_item(self, item): - space_replacement = "+" - if self._backend == ADBackend.BD_PBIS: - if space_replacement in item: - raise ValueError( - "{} is not permitted in subject name".format(space_replacement)) - # PBIS relace space with "+", eg "ab cd" -> "ab++cd" - # PBIS pam module will reverse it back - return item.replace(" ", space_replacement) - return item def _is_responsible_for(self, subject_rec): try: @@ -248,9 +219,6 @@ def _add_upn(self, subject_rec): try: upn = subject_rec["other_config"]["subject-upn"] user, domain = upn.split(sep) - if self._backend == ADBackend.BD_PBIS: - # PBIS convert domain to UPPER case, we revert it back - domain = domain.lower() self._lines.append("{}{}{}".format(user, sep, domain)) except KeyError: logger.info("subject does not have upn %s", subject_rec) @@ -260,15 +228,12 @@ def _add_upn(self, subject_rec): def _add_subject(self, subject_rec): try: sid = subject_rec['subject_identifier'] - name = subject_rec["other_config"]["subject-name"] - formatted_name = self._format_item(name) + formatted_name = subject_rec["other_config"]["subject-name"] logger.debug("Permit user %s, Current sid is %s", formatted_name, sid) self._lines.append(formatted_name) # If the ssh key is permitted in the authorized_keys file, # The original name is compared, add UPN and original name - if self._backend == ADBackend.BD_PBIS and name != formatted_name: - self._lines.append(name) self._add_upn(subject_rec) # pylint: disable=broad-except except Exception as exp: @@ -287,8 +252,7 @@ def _match_subject(self, subject_rec): def _add_subject(self, subject_rec): try: sid = subject_rec['subject_identifier'] - name = self._format_item( - subject_rec["other_config"]["subject-name"]) + name = subject_rec["other_config"]["subject-name"] logger.debug("Permit group %s, Current sid is %s", name, sid) self._lines.append(name) # pylint: disable=broad-except @@ -368,10 +332,7 @@ def __init__(self, session, args, ad_enabled=True): "/etc/nsswitch.conf", session, args, ad_enabled) modules = "files sss" if ad_enabled: - if self._backend == ADBackend.BD_PBIS: - modules = "files sss lsass" - else: - modules = "files hcp winbind" + modules = "files hcp winbind" self._update_key_value("passwd", modules) self._update_key_value("group", modules) self._update_key_value("shadow", modules) diff --git a/python3/plugins/test_extauth_hook_AD.py b/python3/plugins/test_extauth_hook_AD.py index 3f0f22e40dd..eb9d1107e87 100644 --- a/python3/plugins/test_extauth_hook_AD.py +++ b/python3/plugins/test_extauth_hook_AD.py @@ -38,7 +38,7 @@ def test_run_cmd(caplog): def line_exists_in_config(lines, line): """ - Helper function to detect whether configration match expectation + Helper function to check if the configuration matches the expectation """ return any(line.split() == l.split() for l in lines) @@ -46,8 +46,6 @@ def line_exists_in_config(lines, line): domain = "conappada.local" args_bd_winbind = {'auth_type': 'AD', 'service_name': domain, 'ad_backend': 'winbind'} -args_bd_pbis = {'auth_type': 'AD', - 'service_name': domain, 'ad_backend': 'pbis'} mock_session = MagicMock() subjects = ['OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda9'] @@ -56,8 +54,7 @@ def line_exists_in_config(lines, line): admin_roles = [admin_role] mock_session.xenapi.role.get_by_name_label.return_value = admin_roles -# pylint: disable=unused-argument, protected-access, redefined-outer-name, missing-function-docstring -# pylint: disable=too-many-arguments, missing-class-docstring, no-self-use +# pylint: disable=unused-argument, redefined-outer-name def build_user(domain_netbios, domain, name, is_admin=True): @@ -120,14 +117,6 @@ def test_ad_enabled_with_winbind(self, mock_rename, mock_chmod): enabled_keyward = "auth sufficient pam_winbind.so try_first_pass try_authtok" self.assertTrue(line_exists_in_config(static._lines, enabled_keyward)) - def test_ad_enabled_with_pbis(self, mock_rename, mock_chmod): - # pam_lsass should be used - mock_rename.side_effect = mock_rename_to_clean - static = StaticSSHPam(mock_session, args_bd_pbis) - static.apply() - enabled_keyward = "auth sufficient /lib/security/pam_lsass.so try_first_pass try_authtok" - self.assertTrue(line_exists_in_config(static._lines, enabled_keyward)) - @patch("extauth_hook_AD.ADConfig._install") class TestUsersList(TestCase): @@ -146,21 +135,12 @@ def test_permit_admin_user(self, mock_install): # Domain user with admin role should be included in config file user = build_user("CONNAPP", "CONAPPADA.LOCAL", "radmin", True) mock_session.xenapi.subject.get_record.return_value = user - dynamic = UsersList(mock_session, args_bd_pbis) + dynamic = UsersList(mock_session, args_bd_winbind) dynamic.apply() self.assertIn(r"CONNAPP\radmin", dynamic._lines) - self.assertIn(r"radmin@conappada.local", dynamic._lines) + self.assertIn(r"radmin@CONAPPADA.LOCAL", dynamic._lines) mock_install.assert_called() - def test_pbis_permit_admin_user_with_space(self, mock_install): - # Domain user name with space should be repalced by "+" with PBIS - user = build_user("CONNAPP", "conappada.local", "radmin l1", True) - mock_session.xenapi.subject.get_record.return_value = user - permit_user = r"CONNAPP\radmin++l1" - dynamic = UsersList(mock_session, args_bd_pbis) - dynamic.apply() - self.assertIn(permit_user, dynamic._lines) - mock_install.assert_called() def test_winbind_permit_admin_user_with_space(self, mock_install): # Domain user name with space should be surrounded by [] with winbind @@ -181,40 +161,6 @@ def test_not_permit_non_admin_user(self, mock_install): dynamic.apply() self.assertNotIn(permit_user, dynamic._lines) - def test_pbis_not_permit_pool_admin_with_plus_in_name(self, mock_install): - """ - Domain user name should not contain "+" - """ - user = build_user("CONNAPP", "conappada.local", "radm+in", True) - mock_session.xenapi.subject.get_record.return_value = user - permit_user = r"CONNAPP\radm+in" - dynamic = UsersList(mock_session, args_bd_pbis) - dynamic.apply() - self.assertNotIn(permit_user, dynamic._lines) - - def test_failed_to_add_one_admin_should_not_affact_others(self, mock_install): - """ - Failed to add one bad domain users should not affact others - """ - bad_user = build_user("CONNAPP", "conappada.local", "bad+in", True) - good_user = build_user("CONNAPP", "conappada.local", "good", True) - - mock_session_with_multi_users = MagicMock() - - subjects = ['OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda9', - 'OpaqueRef:96ae4be5-8815-4de8-a40f-d5e5c531dda1'] - mock_session_with_multi_users.xenapi.subject.get_all.return_value = subjects - mock_session_with_multi_users.xenapi.subject.get_record.side_effect = [ - bad_user, good_user] - mock_session_with_multi_users.xenapi.role.get_by_name_label.return_value = admin_roles - - bad_user = r"CONNAPP\bad+in" - good_user = r"CONNAPP\good" - dynamic = UsersList(mock_session_with_multi_users, args_bd_pbis) - dynamic.apply() - self.assertIn(good_user, dynamic._lines) - self.assertNotIn(bad_user, dynamic._lines) - @patch("extauth_hook_AD.ADConfig._install") class TestGroups(TestCase): From ae5f7b029d4baf018173e04194041511bf76b2d8 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 18 Jul 2024 12:00:00 +0200 Subject: [PATCH 194/341] pytest: Run xenopsd tests, rm pytest-cov (obsolete), update pyright Also prepare removal of the old baseline SMAPv3 code: ocaml/xapi-storage/python/xapi/storage/api/{volume,plugin,*datapath}.py - Reduce pytest --cov-fail-under to 50% (may be completely removed) - Remove it from the directories to test. Update the minimum supported Python version for XenAPI.py to 3.6 (for XS8) for https://pypi.org/project/XenAPI/ to `python_requires = >=3.6.*, <4` Signed-off-by: Bernhard Kaindl --- .github/workflows/other.yml | 2 +- .pre-commit-config.yaml | 3 +-- pyproject.toml | 4 ++-- scripts/examples/python/setup.cfg | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 107ba9ba573..58cc7c8cdfe 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -52,7 +52,7 @@ jobs: pytest -vv -rA --cov=ocaml ocaml --cov-report term-missing --cov-report xml:.git/coverage${{matrix.python-version}}.xml - --cov-fail-under 60 + --cov-fail-under 50 env: PYTHONDEVMODE: yes PYTHONPATH: "python3:python3/stubs" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d714b01cd6e..124645bd875 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -81,7 +81,6 @@ repos: - opentelemetry-api - opentelemetry-exporter-zipkin-json - opentelemetry-sdk - - pytest-coverage - pytest-mock - mock - wrapt @@ -89,7 +88,7 @@ repos: - repo: https://github.com/RobertCraigie/pyright-python - rev: v1.1.361 + rev: v1.1.372 hooks: - id: pyright name: check that python3 tree passes pyright/VSCode check diff --git a/pyproject.toml b/pyproject.toml index b821b222804..630f6c51e25 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -260,8 +260,8 @@ addopts = "-v -ra" # xfail_strict: require to remove pytext.xfail marker when test is fixed # required_plugins: require that these plugins are installed before testing # ----------------------------------------------------------------------------- -testpaths = ["python3", "ocaml/xcp-rrdd", "ocaml/xapi-storage"] -required_plugins = ["pytest-cov", "pytest-mock"] +testpaths = ["python3", "ocaml/xcp-rrdd", "ocaml/xenopsd"] +required_plugins = ["pytest-mock"] log_cli_level = "INFO" log_cli = true minversion = "7.0" diff --git a/scripts/examples/python/setup.cfg b/scripts/examples/python/setup.cfg index 059e6631bd1..47601de9c05 100644 --- a/scripts/examples/python/setup.cfg +++ b/scripts/examples/python/setup.cfg @@ -19,7 +19,7 @@ classifiers = [options] packages = find: -python_requires = >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4 +python_requires = >=3.6.*, <4 [bdist_wheel] # This flag says that the code is written to work on both Python 2 and Python From 97e117db2af6f158cff4f0c2a2feb47d71c8aae5 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 18 Jul 2024 07:36:38 +0100 Subject: [PATCH 195/341] Python3 CI: fix import error Signed-off-by: Stephen Cheng --- .github/workflows/other.yml | 2 +- pyproject.toml | 2 +- python3/dnf_plugins/ptoken.py | 2 +- python3/stubs/__init__.py | 0 python3/{tests => }/stubs/dnf.py | 0 python3/tests/test_dnf_plugins.py | 10 ++++++---- 6 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 python3/stubs/__init__.py rename python3/{tests => }/stubs/dnf.py (100%) diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index bc73c18338c..97548160b43 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -58,7 +58,7 @@ jobs: --cov-fail-under 0 env: PYTHONDEVMODE: yes - PYTHONPATH: "python3:python3/tests/stubs" + PYTHONPATH: "python3:python3/stubs" - name: Upload coverage report to Coveralls uses: coverallsapp/github-action@v2 diff --git a/pyproject.toml b/pyproject.toml index efdcd13494e..58743495b93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -263,7 +263,7 @@ required_plugins = ["pytest-cov", "pytest-mock"] log_cli_level = "INFO" log_cli = true minversion = "7.0" -pythonpath = "python3/stubs:scripts/examples/python" # Allow to import the XenAPI module +pythonpath = "python3/stubs" # Allow to import the XenAPI module python_files = ["test_*.py", "it_*.py"] python_functions = ["test_", "it_", "when_"] xfail_strict = true # is used to fail tests that are marked as xfail but pass(for TDD) diff --git a/python3/dnf_plugins/ptoken.py b/python3/dnf_plugins/ptoken.py index 75c926e13b4..c2ea73fccc8 100644 --- a/python3/dnf_plugins/ptoken.py +++ b/python3/dnf_plugins/ptoken.py @@ -15,7 +15,7 @@ def config(self): """ DNF plugin config hook, refer to https://dnf.readthedocs.io/en/latest/api_plugins.html""" try: - with open('/etc/xensource/ptoken', encoding="utf-8") as file: + with open(PTOKEN_PATH, encoding="utf-8") as file: ptoken = file.read().strip() except Exception: #pylint: disable=broad-exception-caught logging.error("Failed to open %s", PTOKEN_PATH) diff --git a/python3/stubs/__init__.py b/python3/stubs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/python3/tests/stubs/dnf.py b/python3/stubs/dnf.py similarity index 100% rename from python3/tests/stubs/dnf.py rename to python3/stubs/dnf.py diff --git a/python3/tests/test_dnf_plugins.py b/python3/tests/test_dnf_plugins.py index c7d5f587532..2f82b1eb5cb 100644 --- a/python3/tests/test_dnf_plugins.py +++ b/python3/tests/test_dnf_plugins.py @@ -3,6 +3,7 @@ import sys import json from unittest.mock import MagicMock, patch +from python3.tests.import_helper import import_file_as_module sys.modules["urlgrabber"] = MagicMock() @@ -14,8 +15,8 @@ # Some test case does not use self -from dnf_plugins import accesstoken -from dnf_plugins import ptoken +accesstoken = import_file_as_module("python3/dnf_plugins/accesstoken.py") +ptoken = import_file_as_module("python3/dnf_plugins/ptoken.py") REPO_NAME = "testrepo" @@ -31,7 +32,7 @@ def _mock_repo(a_token=None, p_token=None, baseurl=None): return mock_repo -@patch("dnf_plugins.accesstoken.urlgrabber") +@patch("accesstoken.urlgrabber") class TestAccesstoken(unittest.TestCase): """Test class for dnf access plugin""" @@ -74,7 +75,8 @@ class TestPtoken(unittest.TestCase): """Test class for ptoken dnf plugin""" def test_failed_to_open_ptoken_file(self): """Exception should raised if the system does not have PTOKEN_PATH""" - ptoken.PTOKEN_PATH = "/some/not/exist/path" + # Disable pyright warning as we need to set the PTOKEN_PATH to test the exception + ptoken.PTOKEN_PATH = "/some/not/exist/path" # pyright: ignore[reportAttributeAccessIssue] with self.assertRaises(Exception): ptoken.Ptoken(MagicMock(), MagicMock()).config() From 7656999d6cb3a062835c2f203592b75eec5948ff Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 22 Jul 2024 12:00:00 +0200 Subject: [PATCH 196/341] Update test storage.dummyv5/plugin.py: Base the test on the new v5 API Signed-off-by: Bernhard Kaindl --- .../volume/org.xen.xapi.storage.dummyv5/plugin.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py index e9ef122ca07..bf54820cdc4 100755 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py +++ b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummyv5/plugin.py @@ -6,15 +6,15 @@ import os import sys -import xapi.storage.api.plugin +import xapi.storage.api.v5.plugin # pylint: disable=no-name-in-module -class Implementation(xapi.storage.api.plugin.Plugin_skeleton): +class Implementation(xapi.storage.api.v5.plugin.Plugin_skeleton): - def diagnostics(self, dbg): + def diagnostics(self, dbg): # pylint: disable=unused-argument return "Dummy diagnostics" - def query(self, dbg): + def query(self, dbg): # pylint: disable=unused-argument return { "plugin": "dummy", "name": "dummy SR plugin", @@ -35,11 +35,11 @@ def query(self, dbg): if __name__ == "__main__": - cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) + cmd = xapi.storage.api.v5.plugin.Plugin_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == 'Plugin.diagnostics': cmd.diagnostics() elif base == 'Plugin.Query': cmd.query() else: - raise xapi.storage.api.plugin.Unimplemented(base) + raise xapi.storage.api.v5.plugin.Unimplemented(base) From a26ce264925c6310f948b9a675deda0c7b789b86 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 22 Jul 2024 12:00:00 +0200 Subject: [PATCH 197/341] Remove ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy Signed-off-by: Bernhard Kaindl --- ocaml/xapi-storage-script/main.ml | 5 +- .../org.xen.xapi.storage.dummy/Plugin.Query | 1 - .../Plugin.diagnostics | 1 - .../org.xen.xapi.storage.dummy/SR.attach | 1 - .../org.xen.xapi.storage.dummy/SR.create | 1 - .../org.xen.xapi.storage.dummy/SR.detach | 1 - .../volume/org.xen.xapi.storage.dummy/SR.ls | 1 - .../volume/org.xen.xapi.storage.dummy/SR.stat | 1 - .../org.xen.xapi.storage.dummy/Volume.create | 1 - .../org.xen.xapi.storage.dummy/Volume.destroy | 1 - .../org.xen.xapi.storage.dummy/Volume.stat | 1 - .../org.xen.xapi.storage.dummy/plugin.py | 44 ------------ .../volume/org.xen.xapi.storage.dummy/sr.py | 71 ------------------- .../org.xen.xapi.storage.dummy/volume.py | 64 ----------------- 14 files changed, 1 insertion(+), 193 deletions(-) delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy delete mode 120000 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat delete mode 100755 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py delete mode 100755 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py delete mode 100755 ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 2c904af7a43..87956ee47fb 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1857,10 +1857,7 @@ let self_test_plugin ~root_dir plugin = failwith "self test failed" let self_test ~root_dir = - ( self_test_plugin ~root_dir "org.xen.xapi.storage.dummy" >>>= fun () -> - self_test_plugin ~root_dir "org.xen.xapi.storage.dummyv5" - ) - >>= function + self_test_plugin ~root_dir "org.xen.xapi.storage.dummyv5" >>= function | Ok () -> info "test thread shutdown cleanly" ; Async_unix.exit 0 diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query deleted file mode 120000 index 96bd1391c0e..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.Query +++ /dev/null @@ -1 +0,0 @@ -plugin.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics deleted file mode 120000 index 96bd1391c0e..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Plugin.diagnostics +++ /dev/null @@ -1 +0,0 @@ -plugin.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.attach +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.create +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.detach +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.ls +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat deleted file mode 120000 index 482eaaf76a5..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/SR.stat +++ /dev/null @@ -1 +0,0 @@ -sr.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.create +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.destroy +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat deleted file mode 120000 index 1d6acb7b332..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/Volume.stat +++ /dev/null @@ -1 +0,0 @@ -volume.py \ No newline at end of file diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py deleted file mode 100755 index 40e3a00911c..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/plugin.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import os -import sys -import xapi.storage.api.plugin - - -class Implementation(xapi.storage.api.plugin.Plugin_skeleton): - - def diagnostics(self, dbg): - return "Dummy diagnostics" - - def query(self, dbg): - return { - "plugin": "dummy", - "name": "dummy SR plugin", - "description": ("Dummy SR for unit tests."), - "vendor": "Citrix Systems Inc", - "copyright": "(C) 2018 Citrix Inc", - "version": "1.0", - "required_api_version": "3.0", - "features": [ - "SR_ATTACH", - "SR_DETACH", - "SR_CREATE", - "VDI_CREATE", - "VDI_DESTROY"], - "configuration": {}, - "required_cluster_stack": []} - - -if __name__ == "__main__": - cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == 'Plugin.diagnostics': - cmd.diagnostics() - elif base == 'Plugin.Query': - cmd.query() - else: - raise xapi.storage.api.plugin.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py deleted file mode 100755 index 82c77d891db..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/sr.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import os -import sys -import urllib.parse -import xapi.storage.api.volume - -import plugin - - -class Implementation(xapi.storage.api.volume.SR_skeleton): - - def attach(self, dbg, uri): - return "file:///tmp/dummy" - - def create(self, dbg, uri, name, description, configuration): - return - - def detach(self, dbg, sr): - urllib.parse.urlparse(sr) - return - - def ls(self, dbg, sr): - urllib.parse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return [{ - "name": qr['name'], - "description": qr['description'], - "key": "file1", - "uuid": "file1", - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - }] - - def stat(self, dbg, sr): - urllib.parse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return { - "sr": sr, - "name": qr['name'], - "description": qr['description'], - "total_space": 0, - "free_space": 0, - "datasources": [], - "clustered": False, - "health": ["Healthy", ""] - } - - -if __name__ == "__main__": - cmd = xapi.storage.api.volume.SR_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == 'SR.attach': - cmd.attach() - elif base == 'SR.create': - cmd.create() - elif base == 'SR.detach': - cmd.detach() - elif base == 'SR.ls': - cmd.ls() - elif base == 'SR.stat': - cmd.stat() - else: - raise xapi.storage.api.volume.Unimplemented(base) diff --git a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py b/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py deleted file mode 100755 index 848c13bfd39..00000000000 --- a/ocaml/xapi-storage-script/test/volume/org.xen.xapi.storage.dummy/volume.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 - -""" - Copyright (C) Citrix Systems, Inc. -""" - -import uuid -import urllib.parse -import os -import sys -import xapi.storage.api.volume -import xapi - -import plugin - - -class Implementation(xapi.storage.api.volume.Volume_skeleton): - - def create(self, dbg, sr, name, description, size): - urllib.parse.urlparse(sr) - voluuid = str(uuid.uuid4()) - return { - "name": name, - "description": description, - "key": voluuid, - "uuid": voluuid, - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - } - - def destroy(self, dbg, sr, key): - urllib.parse.urlparse(sr) - return - - def stat(self, dbg, sr, key): - urllib.parse.urlparse(sr) - qr = plugin.Implementation().query(dbg) - return { - "name": qr['name'], - "description": qr['description'], - "key": key, - "uuid": key, - "read_write": True, - "virtual_size": 0, - "physical_utilisation": 0, - "uri": ["raw+file:///tmp/disk.raw"], - "keys": {}, - } - - -if __name__ == "__main__": - cmd = xapi.storage.api.volume.Volume_commandline(Implementation()) - base = os.path.basename(sys.argv[0]) - if base == "Volume.create": - cmd.create() - elif base == "Volume.destroy": - cmd.destroy() - elif base == "Volume.stat": - cmd.stat() - else: - raise xapi.storage.api.volume.Unimplemented(base) From 910289f209c125299bc4014b90c116189cb1f719 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Mon, 22 Jul 2024 12:00:00 +0200 Subject: [PATCH 198/341] Remove ocaml/xapi-storage/python/xapi/storage/api/{volume,plugin,*datapath}.py Signed-off-by: Bernhard Kaindl --- .../python/xapi/storage/api/datapath.py | 440 ----- .../python/xapi/storage/api/plugin.py | 251 --- .../python/xapi/storage/api/test_datapath.py | 138 -- .../python/xapi/storage/api/volume.py | 1429 ----------------- 4 files changed, 2258 deletions(-) delete mode 100644 ocaml/xapi-storage/python/xapi/storage/api/datapath.py delete mode 100644 ocaml/xapi-storage/python/xapi/storage/api/plugin.py delete mode 100644 ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py delete mode 100644 ocaml/xapi-storage/python/xapi/storage/api/volume.py diff --git a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/datapath.py deleted file mode 100644 index 957b8c0362f..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/datapath.py +++ /dev/null @@ -1,440 +0,0 @@ -from __future__ import print_function - -import argparse -import json -import logging -import sys -import traceback - -import xapi -# pylint: disable=line-too-long,superfluous-parens,unused-argument -# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi -from xapi import ( - InternalError, - Rpc_light_failure, - TypeError, - UnknownMethod, - UnmarshalException, - is_str, - success, -) - -# pylint: disable=invalid-name,redefined-builtin,undefined-variable -# pyright: reportUndefinedVariable=false -if sys.version_info[0] > 2: - unicode = str - -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Datapath_server_dispatcher: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def open(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('persistent' in args): - raise UnmarshalException('argument missing', 'persistent', '') - persistent = args["persistent"] - if not isinstance(persistent, bool): - raise TypeError("bool", repr(persistent)) - results = self._impl.open(dbg, uri, persistent) - return results - def attach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not is_str(domain): - raise TypeError("string", repr(domain)) - results = self._impl.attach(dbg, uri, domain) - if not is_str(results['domain_uuid']): - raise TypeError("string", repr(results['domain_uuid'])) - if results['implementation'][0] == 'Blkback': - if not is_str(results['implementation'][1]): - raise TypeError("string", repr(results['implementation'][1])) - elif results['implementation'][0] == 'Tapdisk3': - if not is_str(results['implementation'][1]): - raise TypeError("string", repr(results['implementation'][1])) - elif results['implementation'][0] == 'Qdisk': - if not is_str(results['implementation'][1]): - raise TypeError("string", repr(results['implementation'][1])) - return results - def activate(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not is_str(domain): - raise TypeError("string", repr(domain)) - results = self._impl.activate(dbg, uri, domain) - return results - def deactivate(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not is_str(domain): - raise TypeError("string", repr(domain)) - results = self._impl.deactivate(dbg, uri, domain) - return results - def detach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('domain' in args): - raise UnmarshalException('argument missing', 'domain', '') - domain = args["domain"] - if not is_str(domain): - raise TypeError("string", repr(domain)) - results = self._impl.detach(dbg, uri, domain) - return results - def close(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - results = self._impl.close(dbg, uri) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Datapath.open": - return success(self.open(args)) - elif method == "Datapath.attach": - return success(self.attach(args)) - elif method == "Datapath.activate": - return success(self.activate(args)) - elif method == "Datapath.deactivate": - return success(self.deactivate(args)) - elif method == "Datapath.detach": - return success(self.detach(args)) - elif method == "Datapath.close": - return success(self.close(args)) -class Datapath_skeleton: - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - def __init__(self): - pass - def open(self, dbg, uri, persistent): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.open") - def attach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.attach") - def activate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.activate") - def deactivate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.deactivate") - def detach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.detach") - def close(self, dbg, uri): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - raise Unimplemented("Datapath.close") -class Datapath_test: - """ - Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. - Every function is idempotent. Every function takes a domain parameter which allows - the implementation to track how many domains are currently using the volume. - """ - def __init__(self): - pass - def open(self, dbg, uri, persistent): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def attach(self, dbg, uri, domain): - # type:(str, str, str) -> dict[str, tuple[str, Any] | str] - """ - Return a valid results dictionary to Datapath_server_dispatcher.attach() - - The returned dict must contain the "domain_uuid" key with a string value. - The returned dict must contain the "implementation" key with two elements: - If the first element is one of "Blkback", "Tapdisk3" or "Qdisk", - the second element must be a string. Else, the dispatcher returns an error. - - See Datapath_server_dispatcher.attach() for the implementation details. - """ - # Fixed to not raise an internal error in Datapath_server_dispatcher.attach(): - result = { "domain_uuid": domain, "implementation": (uri, dbg) } - if not domain: # Provoke an internal error in the dispatcher to cover its code - result.pop("domain_uuid") # by removing the required "domain_uuid" key. - if domain == "5": - result["domain_uuid"] = 5 # Return an integer to provoke a type error. - if dbg == "inject_error" and uri in ["Blkback", "Tapdisk3", "Qdisk"]: - result["implementation"] = (uri, False) - return result - - def activate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def deactivate(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def detach(self, dbg, uri, domain): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result - def close(self, dbg, uri): - """Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume.""" - result = {} - return result -class Datapath_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Datapath_server_dispatcher(self.impl) - def _parse_open(self): - """[open uri persistent] is called before a disk is attached to a VM. If persistent is true then care should be taken to persist all writes to the disk. If persistent is false then the implementation should configure a temporary location for writes so they can be thrown away on [close].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[open uri persistent] is called before a disk is attached to a VM. If persistent is true then care should be taken to persist all writes to the disk. If persistent is false then the implementation should configure a temporary location for writes so they can be thrown away on [close].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('--persistent', action='store_true', help='True means the disk data is persistent and should be preserved when the datapath is closed i.e. when a VM is shutdown or rebooted. False means the data should be thrown away when the VM is shutdown or rebooted.') - return vars(parser.parse_args()) - def _parse_attach(self): - """[attach uri domain] prepares a connection between the storage named by [uri] and the Xen domain with id [domain]. The return value is the information needed by the Xen toolstack to setup the shared-memory blkfront protocol. Note that the same volume may be simultaneously attached to multiple hosts for example over a migrate. If an implementation needs to perform an explicit handover, then it should implement [activate] and [deactivate]. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[attach uri domain] prepares a connection between the storage named by [uri] and the Xen domain with id [domain]. The return value is the information needed by the Xen toolstack to setup the shared-memory blkfront protocol. Note that the same volume may be simultaneously attached to multiple hosts for example over a migrate. If an implementation needs to perform an explicit handover, then it should implement [activate] and [deactivate]. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_activate(self): - """[activate uri domain] is called just before a VM needs to read or write its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[activate uri domain] is called just before a VM needs to read or write its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_deactivate(self): - """[deactivate uri domain] is called as soon as a VM has finished reading or writing its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[deactivate uri domain] is called as soon as a VM has finished reading or writing its disk. This is an opportunity for an implementation which needs to perform an explicit volume handover to do it. This function is called in the migration downtime window so delays here will be noticeable to users and should be minimised. This function is idempotent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_detach(self): - """[detach uri domain] is called sometime after a VM has finished reading or writing its disk. This is an opportunity to clean up any resources associated with the disk. This function is called outside the migration downtime window so can be slow without affecting users. This function is idempotent. This function should never fail. If an implementation is unable to perform some cleanup right away then it should queue the action internally. Any error result represents a bug in the implementation.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[detach uri domain] is called sometime after a VM has finished reading or writing its disk. This is an opportunity to clean up any resources associated with the disk. This function is called outside the migration downtime window so can be slow without affecting users. This function is idempotent. This function should never fail. If an implementation is unable to perform some cleanup right away then it should queue the action internally. Any error result represents a bug in the implementation.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - parser.add_argument('domain', action='store', help='An opaque string which represents the Xen domain.') - return vars(parser.parse_args()) - def _parse_close(self): - """[close uri] is called after a disk is detached and a VM shutdown. This is an opportunity to throw away writes if the disk is not persistent.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[close uri] is called after a disk is detached and a VM shutdown. This is an opportunity to throw away writes if the disk is not persistent.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='A URI which represents how to access the volume disk data.') - return vars(parser.parse_args()) - def open(self): - use_json = False - try: - request = self._parse_open() - use_json = 'json' in request and request['json'] - results = self.dispatcher.open(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def attach(self): - use_json = False - try: - request = self._parse_attach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.attach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def activate(self): - use_json = False - try: - request = self._parse_activate() - use_json = 'json' in request and request['json'] - results = self.dispatcher.activate(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def deactivate(self): - use_json = False - try: - request = self._parse_deactivate() - use_json = 'json' in request and request['json'] - results = self.dispatcher.deactivate(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def detach(self): - use_json = False - try: - request = self._parse_detach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.detach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def close(self): - use_json = False - try: - request = self._parse_close() - use_json = 'json' in request and request['json'] - results = self.dispatcher.close(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class datapath_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Datapath=None): - self.Datapath = Datapath - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Datapath") and self.Datapath: - return self.Datapath._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class datapath_server_test(datapath_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - datapath_server_dispatcher.__init__(self, Datapath_server_dispatcher(Datapath_test())) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py b/ocaml/xapi-storage/python/xapi/storage/api/plugin.py deleted file mode 100644 index d9199a98771..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/plugin.py +++ /dev/null @@ -1,251 +0,0 @@ -from __future__ import print_function - -import argparse -import json -import logging -import sys -import traceback - -import xapi -# pylint: disable=line-too-long,superfluous-parens,unused-argument -# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi -from xapi import ( - InternalError, - Rpc_light_failure, - TypeError, - UnknownMethod, - UnmarshalException, - is_str, - success, -) - -# pylint: disable=invalid-name,redefined-builtin,undefined-variable -# pyright: reportUndefinedVariable=false -if sys.version_info[0] > 2: - unicode = str - -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Plugin_server_dispatcher: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def query(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - results = self._impl.query(dbg) - if not is_str(results['plugin']): - raise TypeError("string", repr(results['plugin'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not is_str(results['vendor']): - raise TypeError("string", repr(results['vendor'])) - if not is_str(results['copyright']): - raise TypeError("string", repr(results['copyright'])) - if not is_str(results['version']): - raise TypeError("string", repr(results['version'])) - if not is_str(results['required_api_version']): - raise TypeError("string", repr(results['required_api_version'])) - if not isinstance(results['features'], list): - raise TypeError("string list", repr(results['features'])) - for tmp_1 in results['features']: - if not is_str(tmp_1): - raise TypeError("string", repr(tmp_1)) - if not isinstance(results['configuration'], dict): - raise TypeError("(string * string) list", repr(results['configuration'])) - for tmp_2 in results['configuration'].keys(): - if not is_str(tmp_2): - raise TypeError("string", repr(tmp_2)) - for tmp_2 in results['configuration'].values(): - if not is_str(tmp_2): - raise TypeError("string", repr(tmp_2)) - if not isinstance(results['required_cluster_stack'], list): - raise TypeError("string list", repr(results['required_cluster_stack'])) - for tmp_3 in results['required_cluster_stack']: - if not is_str(tmp_3): - raise TypeError("string", repr(tmp_3)) - return results - def ls(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - results = self._impl.ls(dbg) - if not isinstance(results, list): - raise TypeError("string list", repr(results)) - for tmp_4 in results: - if not is_str(tmp_4): - raise TypeError("string", repr(tmp_4)) - return results - def diagnostics(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - results = self._impl.diagnostics(dbg) - if not is_str(results): - raise TypeError("string", repr(results)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Plugin.query": - return success(self.query(args)) - elif method == "Plugin.ls": - return success(self.ls(args)) - elif method == "Plugin.diagnostics": - return success(self.diagnostics(args)) -class Plugin_skeleton: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self): - pass - def query(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.query") - def ls(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.ls") - def diagnostics(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - raise Unimplemented("Plugin.diagnostics") -class Plugin_test: - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - def __init__(self): - pass - def query(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["query_result"] = { "plugin": "string", "name": "string", "description": "string", "vendor": "string", "copyright": "string", "version": "string", "required_api_version": "string", "features": [ "string", "string" ], "configuration": { "string": "string" }, "required_cluster_stack": [ "string", "string" ] } - return result - def ls(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["srs"] = [ "string", "string" ] - return result - def diagnostics(self, dbg): - """Discover properties of this implementation. Every implementation must support the query interface or it will not be recognised as a storage plugin by xapi.""" - result = {} - result["diagnostics"] = "string" - return result -class Plugin_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Plugin_server_dispatcher(self.impl) - def _parse_query(self): - """Query this implementation and return its properties. This is called by xapi to determine whether it is compatible with xapi and to discover the supported features.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='Query this implementation and return its properties. This is called by xapi to determine whether it is compatible with xapi and to discover the supported features.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def _parse_ls(self): - """[ls dbg]: returns a list of attached SRs""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[ls dbg]: returns a list of attached SRs') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def _parse_diagnostics(self): - """Returns a printable set of backend diagnostic information. Implementations are encouraged to include any data which will be useful to diagnose problems. Note this data should not include personally-identifiable data as it is intended to be automatically included in bug reports.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='Returns a printable set of backend diagnostic information. Implementations are encouraged to include any data which will be useful to diagnose problems. Note this data should not include personally-identifiable data as it is intended to be automatically included in bug reports.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - return vars(parser.parse_args()) - def query(self): - use_json = False - try: - request = self._parse_query() - use_json = 'json' in request and request['json'] - results = self.dispatcher.query(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def ls(self): - use_json = False - try: - request = self._parse_ls() - use_json = 'json' in request and request['json'] - results = self.dispatcher.ls(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def diagnostics(self): - use_json = False - try: - request = self._parse_diagnostics() - use_json = 'json' in request and request['json'] - results = self.dispatcher.diagnostics(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class plugin_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Plugin=None): - self.Plugin = Plugin - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Plugin") and self.Plugin: - return self.Plugin._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class plugin_server_test(plugin_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - plugin_server_dispatcher.__init__(self, Plugin_server_dispatcher(Plugin_test())) diff --git a/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py b/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py deleted file mode 100644 index 8b6436657da..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/test_datapath.py +++ /dev/null @@ -1,138 +0,0 @@ -import logging - -import pytest - -import xapi -import xapi.storage.api.datapath - - -def internal_error(error): - """Return a dictionary with an internal error""" - return {"ErrorDescription": ["Internal_error", error], "Status": "Failure"} - - -def assert_error(testee, caplog, method_args, method, error): - """Assert that the result of the testee matches the expected error result""" - args = method_args.copy() - if method != "open": # the persistent arg is only checked for the open method - args["persistent"] = None # pass it, but with a wrong type(not used/checked) - assert testee._dispatch("Datapath." + method, [args]) == internal_error(error) - assert caplog.messages[0] == "caught " + error - caplog.clear() - - -def assert_type_checks(testee, methods, template_args, bad_args, caplog): - """Assert that the result of the testee matches the expected result""" - for arg in bad_args: - # Sigh, if Python would be strongly typed, we wouldn't need this: - # Assert the type checks of the arguments - expected = "bool" if arg == "persistent" else "string" - other_type = False if expected == "string" else "str" - for actual in [None, [], (), {"dict": "val"}, 1, 1.0, str, caplog, other_type]: - bad_args = template_args.copy() - bad_args[arg] = actual - error_msg = "TypeError expected={} actual={}".format(expected, repr(actual)) - for method in methods: - assert_error(testee, caplog, bad_args, method, error_msg) - - # Remove the argument and assert the missing argument checks - bad_args.pop(arg) - error_msg = "UnmarshalException thing=argument missing ty={} desc=".format(arg) - for method in methods: - assert_error(testee, caplog, bad_args, method, error_msg) - - -def assert_attach_type_check(testee, caplog, args, uri): - """Assert that the result of the testee matches the expected result""" - a = args.copy() - a["uri"] = uri - assert testee._dispatch("Datapath.attach", [a]) == { - "Status": "Success", - "Value": {"domain_uuid": a["domain"], "implementation": (uri, a["dbg"])}, - } - if uri == "other": - return - a["dbg"] = "inject_error" - assert_error(testee, caplog, a, "attach", "TypeError expected=string actual=False") - - -def assert_attach_type_checks(testee, caplog, args): - """Assert type checks when attach() returns Blkback, Tapdisk3, Qdisk and others""" - for uri in ["Blkback", "Tapdisk3", "Qdisk", "other"]: - assert_attach_type_check(testee, caplog, args, uri) - - -def test_dispatcher(caplog, capsys): - """ - Test the dispatcher of the Xapi storage API datapath interface - - The dispatcher is a class that routes the calls to the corresponding methods - of a given Datapath implementation class. - """ - # Setup - caplog.set_level(logging.INFO) - - # The testee passes them to the Datapath_test class and its attach method - # is expected to return the values which we use to test the dispatcher: - args = {"dbg": "", "uri": "uri", "domain": "uuid", "persistent": True} - - # Call - - # datapath_server_test() returns an instance of the dispatcher class that - # routes the calls to the corresponding methods of the Datapath_test class: - testee = xapi.storage.api.datapath.datapath_server_test() - - # Test the argument checks of the dispatcher to identify missing arguments: - - # Assert type checks on the dbg and uri arguments - missing = ["dbg", "uri"] - methods = ["attach", "activate", "deactivate", "detach", "open", "close"] - assert_type_checks(testee, methods, args, missing, caplog) - - # Assert type checks on the missing domain argument - missing = ["domain"] - methods = ["attach", "activate", "deactivate", "detach"] - assert_type_checks(testee, methods, args, missing, caplog) - - # Assert type checks on the persistent flag for the open method - missing = ["persistent"] - methods = ["open"] - assert_type_checks(testee, methods, args, missing, caplog) - - # Assert the dispatcher returns the example results of Datapath_test.attach(): - assert_attach_type_checks(testee, caplog, args) - - # Assert the internal error to cover the check by removing the domain argument: - bad = args.copy() - bad["domain"] = "" - assert_error(testee, caplog, bad, "attach", "'domain_uuid'") - # Assert the type check on the domain_uuid return value: - bad["domain"] = "5" - assert_error(testee, caplog, bad, "attach", "TypeError expected=string actual=5") - - # The other methods work as expected. Setup, Call, Assert: - success = {"Status": "Success", "Value": {}} - assert testee._dispatch("Datapath.open", [args]) == success - assert testee._dispatch("Datapath.activate", [args]) == success - assert testee._dispatch("Datapath.deactivate", [args]) == success - assert testee._dispatch("Datapath.detach", [args]) == success - assert testee._dispatch("Datapath.close", [args]) == success - - # Assert that no errors were logged and no output was printed: - assert caplog.messages == [] # No messages were logged - assert capsys.readouterr().out == "" # No output was printed - assert capsys.readouterr().err == "" # No errors were printed - - -def test_exceptions(): - """Cover the code changed by using the is_str() function""" - - with pytest.raises(xapi.TypeError) as exc_info: - _ = xapi.XenAPIException(1, "params") # pylint: disable=pointless-statement - assert str(exc_info.value) == "TypeError expected=string actual=1" - - with pytest.raises(xapi.TypeError) as exc_info: - _ = xapi.storage.api.datapath.Unimplemented( - False - ) # pylint: disable=pointless-statement - assert str(exc_info.value) == "TypeError expected=string actual=False" diff --git a/ocaml/xapi-storage/python/xapi/storage/api/volume.py b/ocaml/xapi-storage/python/xapi/storage/api/volume.py deleted file mode 100644 index 0f01ed6fd97..00000000000 --- a/ocaml/xapi-storage/python/xapi/storage/api/volume.py +++ /dev/null @@ -1,1429 +0,0 @@ -from __future__ import print_function - -import argparse -import json -import logging -import sys -import traceback - -import xapi -# pylint: disable=line-too-long,superfluous-parens,unused-argument -# pylint: disable-next=redefined-builtin # FIXME: TypeError is a custom class in xapi -from xapi import ( - InternalError, - Rpc_light_failure, - TypeError, - UnknownMethod, - UnmarshalException, - is_long, - is_str, - success, -) - -# pylint: disable=invalid-name,redefined-builtin,undefined-variable -# pyright: reportUndefinedVariable=false -if sys.version_info[0] > 2: - long = int - unicode = str - str = bytes - -class Sr_not_attached(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Sr_not_attached", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class SR_does_not_exist(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "SR_does_not_exist", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Volume_does_not_exist(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Volume_does_not_exist", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Unimplemented(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Cancelled(Rpc_light_failure): - def __init__(self, arg_0): - Rpc_light_failure.__init__(self, "Cancelled", [ arg_0 ]) - if not is_str(arg_0): - raise TypeError("string", repr(arg_0)) - self.arg_0 = arg_0 -class Volume_server_dispatcher: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def create(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('name' in args): - raise UnmarshalException('argument missing', 'name', '') - name = args["name"] - if not is_str(name): - raise TypeError("string", repr(name)) - if not('description' in args): - raise UnmarshalException('argument missing', 'description', '') - description = args["description"] - if not is_str(description): - raise TypeError("string", repr(description)) - if not('size' in args): - raise UnmarshalException('argument missing', 'size', '') - size = args["size"] - if not(is_long(size)): - raise TypeError("int64", repr(size)) - results = self._impl.create(dbg, sr, name, description, size) - if not is_str(results['key']): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not is_str(results['uuid']): - raise TypeError("string", repr(results['uuid'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_5 in results['uri']: - if not is_str(tmp_5): - raise TypeError("string", repr(tmp_5)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_6 in results['keys'].keys(): - if not is_str(tmp_6): - raise TypeError("string", repr(tmp_6)) - for tmp_6 in results['keys'].values(): - if not is_str(tmp_6): - raise TypeError("string", repr(tmp_6)) - return results - def snapshot(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - results = self._impl.snapshot(dbg, sr, key) - if not is_str(results['key']): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not is_str(results['uuid']): - raise TypeError("string", repr(results['uuid'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_7 in results['uri']: - if not is_str(tmp_7): - raise TypeError("string", repr(tmp_7)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_8 in results['keys'].keys(): - if not is_str(tmp_8): - raise TypeError("string", repr(tmp_8)) - for tmp_8 in results['keys'].values(): - if not is_str(tmp_8): - raise TypeError("string", repr(tmp_8)) - return results - def clone(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - results = self._impl.clone(dbg, sr, key) - if not is_str(results['key']): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not is_str(results['uuid']): - raise TypeError("string", repr(results['uuid'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_9 in results['uri']: - if not is_str(tmp_9): - raise TypeError("string", repr(tmp_9)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_10 in results['keys'].keys(): - if not is_str(tmp_10): - raise TypeError("string", repr(tmp_10)) - for tmp_10 in results['keys'].values(): - if not is_str(tmp_10): - raise TypeError("string", repr(tmp_10)) - return results - def destroy(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - results = self._impl.destroy(dbg, sr, key) - return results - def set_name(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - if not('new_name' in args): - raise UnmarshalException('argument missing', 'new_name', '') - new_name = args["new_name"] - if not is_str(new_name): - raise TypeError("string", repr(new_name)) - results = self._impl.set_name(dbg, sr, key, new_name) - return results - def set_description(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - if not('new_description' in args): - raise UnmarshalException('argument missing', 'new_description', '') - new_description = args["new_description"] - if not is_str(new_description): - raise TypeError("string", repr(new_description)) - results = self._impl.set_description(dbg, sr, key, new_description) - return results - def set(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - if not('k' in args): - raise UnmarshalException('argument missing', 'k', '') - k = args["k"] - if not is_str(k): - raise TypeError("string", repr(k)) - if not('v' in args): - raise UnmarshalException('argument missing', 'v', '') - v = args["v"] - if not is_str(v): - raise TypeError("string", repr(v)) - results = self._impl.set(dbg, sr, key, k, v) - return results - def unset(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - if not('k' in args): - raise UnmarshalException('argument missing', 'k', '') - k = args["k"] - if not is_str(k): - raise TypeError("string", repr(k)) - results = self._impl.unset(dbg, sr, key, k) - return results - def resize(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - if not('new_size' in args): - raise UnmarshalException('argument missing', 'new_size', '') - new_size = args["new_size"] - if not(is_long(new_size)): - raise TypeError("int64", repr(new_size)) - results = self._impl.resize(dbg, sr, key, new_size) - return results - def stat(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('key' in args): - raise UnmarshalException('argument missing', 'key', '') - key = args["key"] - if not is_str(key): - raise TypeError("string", repr(key)) - results = self._impl.stat(dbg, sr, key) - if not is_str(results['key']): - raise TypeError("string", repr(results['key'])) - if results['uuid'] is not None: - if not is_str(results['uuid']): - raise TypeError("string", repr(results['uuid'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not isinstance(results['read_write'], bool): - raise TypeError("bool", repr(results['read_write'])) - if not(is_long(results['virtual_size'])): - raise TypeError("int64", repr(results['virtual_size'])) - if not(is_long(results['physical_utilisation'])): - raise TypeError("int64", repr(results['physical_utilisation'])) - if not isinstance(results['uri'], list): - raise TypeError("string list", repr(results['uri'])) - for tmp_11 in results['uri']: - if not is_str(tmp_11): - raise TypeError("string", repr(tmp_11)) - if not isinstance(results['keys'], dict): - raise TypeError("(string * string) list", repr(results['keys'])) - for tmp_12 in results['keys'].keys(): - if not is_str(tmp_12): - raise TypeError("string", repr(tmp_12)) - for tmp_12 in results['keys'].values(): - if not is_str(tmp_12): - raise TypeError("string", repr(tmp_12)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "Volume.create": - return success(self.create(args)) - elif method == "Volume.snapshot": - return success(self.snapshot(args)) - elif method == "Volume.clone": - return success(self.clone(args)) - elif method == "Volume.destroy": - return success(self.destroy(args)) - elif method == "Volume.set_name": - return success(self.set_name(args)) - elif method == "Volume.set_description": - return success(self.set_description(args)) - elif method == "Volume.set": - return success(self.set(args)) - elif method == "Volume.unset": - return success(self.unset(args)) - elif method == "Volume.resize": - return success(self.resize(args)) - elif method == "Volume.stat": - return success(self.stat(args)) -class Volume_skeleton: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self): - pass - def create(self, dbg, sr, name, description, size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.create") - def snapshot(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.snapshot") - def clone(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.clone") - def destroy(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.destroy") - def set_name(self, dbg, sr, key, new_name): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set_name") - def set_description(self, dbg, sr, key, new_description): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set_description") - def set(self, dbg, sr, key, k, v): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.set") - def unset(self, dbg, sr, key, k): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.unset") - def resize(self, dbg, sr, key, new_size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.resize") - def stat(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - raise Unimplemented("Volume.stat") -class Volume_test: - """Operations which operate on volumes (also known as Virtual Disk Images)""" - def __init__(self): - pass - def create(self, dbg, sr, name, description, size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def snapshot(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def clone(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result - def destroy(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set_name(self, dbg, sr, key, new_name): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set_description(self, dbg, sr, key, new_description): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def set(self, dbg, sr, key, k, v): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def unset(self, dbg, sr, key, k): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def resize(self, dbg, sr, key, new_size): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - return result - def stat(self, dbg, sr, key): - """Operations which operate on volumes (also known as Virtual Disk Images)""" - result = {} - result["volume"] = { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } - return result -class Volume_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = Volume_server_dispatcher(self.impl) - def _parse_create(self): - """[create sr name description size] creates a new volume in [sr] with [name] and [description]. The volume will have size >= [size] i.e. it is always permissable for an implementation to round-up the volume to the nearest convenient block size""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[create sr name description size] creates a new volume in [sr] with [name] and [description]. The volume will have size >= [size] i.e. it is always permissable for an implementation to round-up the volume to the nearest convenient block size') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('name', action='store', help='A human-readable name to associate with the new disk. This name is intended to be short, to be a good summary of the disk.') - parser.add_argument('description', action='store', help='A human-readable description to associate with the new disk. This can be arbitrarily long, up to the general string size limit.') - parser.add_argument('size', action='store', help='A minimum size (in bytes) for the disk. Depending on the characteristics of the implementation this may be rounded up to (for example) the nearest convenient block size. The created disk will not be smaller than this size.') - return vars(parser.parse_args()) - def _parse_snapshot(self): - """[snapshot sr volume] creates a new volue which is a snapshot of [volume] in [sr]. Snapshots should never be written to; they are intended for backup/restore only. Note the name and description are copied but any extra metadata associated by [set] is not copied.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[snapshot sr volume] creates a new volue which is a snapshot of [volume] in [sr]. Snapshots should never be written to; they are intended for backup/restore only. Note the name and description are copied but any extra metadata associated by [set] is not copied.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_clone(self): - """[clone sr volume] creates a new volume which is a writable clone of [volume] in [sr]. Note the name and description are copied but any extra metadata associated by [set] is not copied.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[clone sr volume] creates a new volume which is a writable clone of [volume] in [sr]. Note the name and description are copied but any extra metadata associated by [set] is not copied.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_destroy(self): - """[destroy sr volume] removes [volume] from [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[destroy sr volume] removes [volume] from [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def _parse_set_name(self): - """[set_name sr volume new_name] changes the name of [volume]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_name sr volume new_name] changes the name of [volume]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_name', action='store', help='New name') - return vars(parser.parse_args()) - def _parse_set_description(self): - """[set_description sr volume new_description] changes the description of [volume]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_description sr volume new_description] changes the description of [volume]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_description', action='store', help='New description') - return vars(parser.parse_args()) - def _parse_set(self): - """[set sr volume key value] associates [key] with [value] in the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set sr volume key value] associates [key] with [value] in the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('k', action='store', help='Key') - parser.add_argument('v', action='store', help='Value') - return vars(parser.parse_args()) - def _parse_unset(self): - """[unset sr volume key] removes [key] and any value associated with it from the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[unset sr volume key] removes [key] and any value associated with it from the metadata of [volume] Note these keys and values are not interpreted by the plugin; they are intended for the higher-level software only.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('k', action='store', help='Key') - return vars(parser.parse_args()) - def _parse_resize(self): - """[resize sr volume new_size] enlarges [volume] to be at least [new_size].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[resize sr volume new_size] enlarges [volume] to be at least [new_size].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - parser.add_argument('new_size', action='store', help='New disk size') - return vars(parser.parse_args()) - def _parse_stat(self): - """[stat sr volume] returns metadata associated with [volume].""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[stat sr volume] returns metadata associated with [volume].') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('key', action='store', help='The volume key') - return vars(parser.parse_args()) - def create(self): - use_json = False - try: - request = self._parse_create() - use_json = 'json' in request and request['json'] - results = self.dispatcher.create(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def snapshot(self): - use_json = False - try: - request = self._parse_snapshot() - use_json = 'json' in request and request['json'] - results = self.dispatcher.snapshot(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def clone(self): - use_json = False - try: - request = self._parse_clone() - use_json = 'json' in request and request['json'] - results = self.dispatcher.clone(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def destroy(self): - use_json = False - try: - request = self._parse_destroy() - use_json = 'json' in request and request['json'] - results = self.dispatcher.destroy(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_name(self): - use_json = False - try: - request = self._parse_set_name() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_name(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_description(self): - use_json = False - try: - request = self._parse_set_description() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_description(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set(self): - use_json = False - try: - request = self._parse_set() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def unset(self): - use_json = False - try: - request = self._parse_unset() - use_json = 'json' in request and request['json'] - results = self.dispatcher.unset(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def resize(self): - use_json = False - try: - request = self._parse_resize() - use_json = 'json' in request and request['json'] - results = self.dispatcher.resize(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def stat(self): - use_json = False - try: - request = self._parse_stat() - use_json = 'json' in request and request['json'] - results = self.dispatcher.stat(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class SR_server_dispatcher: - """Operations which act on Storage Repositories""" - def __init__(self, impl): - """impl is a proxy object whose methods contain the implementation""" - self._impl = impl - def probe(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - results = self._impl.probe(dbg, uri) - if not isinstance(results['srs'], list): - raise TypeError("7 list", repr(results['srs'])) - for tmp_13 in results['srs']: - if not is_str(tmp_13['sr']): - raise TypeError("string", repr(tmp_13['sr'])) - if not is_str(tmp_13['name']): - raise TypeError("string", repr(tmp_13['name'])) - if not is_str(tmp_13['description']): - raise TypeError("string", repr(tmp_13['description'])) - if not(is_long(tmp_13['free_space'])): - raise TypeError("int64", repr(tmp_13['free_space'])) - if not(is_long(tmp_13['total_space'])): - raise TypeError("int64", repr(tmp_13['total_space'])) - if not isinstance(tmp_13['datasources'], list): - raise TypeError("string list", repr(tmp_13['datasources'])) - for tmp_14 in tmp_13['datasources']: - if not is_str(tmp_14): - raise TypeError("string", repr(tmp_14)) - if not isinstance(tmp_13['clustered'], bool): - raise TypeError("bool", repr(tmp_13['clustered'])) - if tmp_13['health'][0] == 'Healthy': - if not is_str(tmp_13['health'][1]): - raise TypeError("string", repr(tmp_13['health'][1])) - elif tmp_13['health'][0] == 'Recovering': - if not is_str(tmp_13['health'][1]): - raise TypeError("string", repr(tmp_13['health'][1])) - if not isinstance(results['uris'], list): - raise TypeError("string list", repr(results['uris'])) - for tmp_15 in results['uris']: - if not is_str(tmp_15): - raise TypeError("string", repr(tmp_15)) - return results - def create(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - if not('name' in args): - raise UnmarshalException('argument missing', 'name', '') - name = args["name"] - if not is_str(name): - raise TypeError("string", repr(name)) - if not('description' in args): - raise UnmarshalException('argument missing', 'description', '') - description = args["description"] - if not is_str(description): - raise TypeError("string", repr(description)) - if not('configuration' in args): - raise UnmarshalException('argument missing', 'configuration', '') - configuration = args["configuration"] - if not isinstance(configuration, dict): - raise TypeError("(string * string) list", repr(configuration)) - for tmp_16 in configuration.keys(): - if not is_str(tmp_16): - raise TypeError("string", repr(tmp_16)) - for tmp_16 in configuration.values(): - if not is_str(tmp_16): - raise TypeError("string", repr(tmp_16)) - results = self._impl.create(dbg, uri, name, description, configuration) - return results - def attach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('uri' in args): - raise UnmarshalException('argument missing', 'uri', '') - uri = args["uri"] - if not is_str(uri): - raise TypeError("string", repr(uri)) - results = self._impl.attach(dbg, uri) - if not is_str(results): - raise TypeError("string", repr(results)) - return results - def detach(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - results = self._impl.detach(dbg, sr) - return results - def destroy(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - results = self._impl.destroy(dbg, sr) - return results - def stat(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - results = self._impl.stat(dbg, sr) - if not is_str(results['sr']): - raise TypeError("string", repr(results['sr'])) - if not is_str(results['name']): - raise TypeError("string", repr(results['name'])) - if not is_str(results['description']): - raise TypeError("string", repr(results['description'])) - if not(is_long(results['free_space'])): - raise TypeError("int64", repr(results['free_space'])) - if not(is_long(results['total_space'])): - raise TypeError("int64", repr(results['total_space'])) - if not isinstance(results['datasources'], list): - raise TypeError("string list", repr(results['datasources'])) - for tmp_17 in results['datasources']: - if not is_str(tmp_17): - raise TypeError("string", repr(tmp_17)) - if not isinstance(results['clustered'], bool): - raise TypeError("bool", repr(results['clustered'])) - if results['health'][0] == 'Healthy': - if not is_str(results['health'][1]): - raise TypeError("string", repr(results['health'][1])) - elif results['health'][0] == 'Recovering': - if not is_str(results['health'][1]): - raise TypeError("string", repr(results['health'][1])) - return results - def set_name(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('new_name' in args): - raise UnmarshalException('argument missing', 'new_name', '') - new_name = args["new_name"] - if not is_str(new_name): - raise TypeError("string", repr(new_name)) - results = self._impl.set_name(dbg, sr, new_name) - return results - def set_description(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - if not('new_description' in args): - raise UnmarshalException('argument missing', 'new_description', '') - new_description = args["new_description"] - if not is_str(new_description): - raise TypeError("string", repr(new_description)) - results = self._impl.set_description(dbg, sr, new_description) - return results - def ls(self, args): - """type-check inputs, call implementation, type-check outputs and return""" - if not isinstance(args, dict): - raise UnmarshalException('arguments', 'dict', repr(args)) - if not('dbg' in args): - raise UnmarshalException('argument missing', 'dbg', '') - dbg = args["dbg"] - if not is_str(dbg): - raise TypeError("string", repr(dbg)) - if not('sr' in args): - raise UnmarshalException('argument missing', 'sr', '') - sr = args["sr"] - if not is_str(sr): - raise TypeError("string", repr(sr)) - results = self._impl.ls(dbg, sr) - if not isinstance(results, list): - raise TypeError("8 list", repr(results)) - for tmp_18 in results: - if not is_str(tmp_18['key']): - raise TypeError("string", repr(tmp_18['key'])) - if tmp_18['uuid'] is not None: - if not is_str(tmp_18['uuid']): - raise TypeError("string", repr(tmp_18['uuid'])) - if not is_str(tmp_18['name']): - raise TypeError("string", repr(tmp_18['name'])) - if not is_str(tmp_18['description']): - raise TypeError("string", repr(tmp_18['description'])) - if not isinstance(tmp_18['read_write'], bool): - raise TypeError("bool", repr(tmp_18['read_write'])) - if not(is_long(tmp_18['virtual_size'])): - raise TypeError("int64", repr(tmp_18['virtual_size'])) - if not(is_long(tmp_18['physical_utilisation'])): - raise TypeError("int64", repr(tmp_18['physical_utilisation'])) - if not isinstance(tmp_18['uri'], list): - raise TypeError("string list", repr(tmp_18['uri'])) - for tmp_19 in tmp_18['uri']: - if not is_str(tmp_19): - raise TypeError("string", repr(tmp_19)) - if not isinstance(tmp_18['keys'], dict): - raise TypeError("(string * string) list", repr(tmp_18['keys'])) - for tmp_20 in tmp_18['keys'].keys(): - if not is_str(tmp_20): - raise TypeError("string", repr(tmp_20)) - for tmp_20 in tmp_18['keys'].values(): - if not is_str(tmp_20): - raise TypeError("string", repr(tmp_20)) - return results - def _dispatch(self, method, params): - """type check inputs, call implementation, type check outputs and return""" - args = params[0] - if method == "SR.probe": - return success(self.probe(args)) - elif method == "SR.create": - return success(self.create(args)) - elif method == "SR.attach": - return success(self.attach(args)) - elif method == "SR.detach": - return success(self.detach(args)) - elif method == "SR.destroy": - return success(self.destroy(args)) - elif method == "SR.stat": - return success(self.stat(args)) - elif method == "SR.set_name": - return success(self.set_name(args)) - elif method == "SR.set_description": - return success(self.set_description(args)) - elif method == "SR.ls": - return success(self.ls(args)) -class SR_skeleton: - """Operations which act on Storage Repositories""" - def __init__(self): - pass - def probe(self, dbg, uri): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.probe") - def create(self, dbg, uri, name, description, configuration): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.create") - def attach(self, dbg, uri): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.attach") - def detach(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.detach") - def destroy(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.destroy") - def stat(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.stat") - def set_name(self, dbg, sr, new_name): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.set_name") - def set_description(self, dbg, sr, new_description): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.set_description") - def ls(self, dbg, sr): - """Operations which act on Storage Repositories""" - raise Unimplemented("SR.ls") -class SR_test: - """Operations which act on Storage Repositories""" - def __init__(self): - pass - def probe(self, dbg, uri): - """Operations which act on Storage Repositories""" - result = {} - result["result"] = { "srs": [ { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None }, { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None } ], "uris": [ "string", "string" ] } - return result - def create(self, dbg, uri, name, description, configuration): - """Operations which act on Storage Repositories""" - result = {} - return result - def attach(self, dbg, uri): - """Operations which act on Storage Repositories""" - result = {} - result["sr"] = "string" - return result - def detach(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - return result - def destroy(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - return result - def stat(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - result["sr"] = { "sr": "string", "name": "string", "description": "string", "free_space": long(0), "total_space": long(0), "datasources": [ "string", "string" ], "clustered": True, "health": None } - return result - def set_name(self, dbg, sr, new_name): - """Operations which act on Storage Repositories""" - result = {} - return result - def set_description(self, dbg, sr, new_description): - """Operations which act on Storage Repositories""" - result = {} - return result - def ls(self, dbg, sr): - """Operations which act on Storage Repositories""" - result = {} - result["volumes"] = [ { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } }, { "key": "string", "uuid": None, "name": "string", "description": "string", "read_write": True, "virtual_size": long(0), "physical_utilisation": long(0), "uri": [ "string", "string" ], "keys": { "string": "string" } } ] - return result -class SR_commandline(): - """Parse command-line arguments and call an implementation.""" - def __init__(self, impl): - self.impl = impl - self.dispatcher = SR_server_dispatcher(self.impl) - def _parse_probe(self): - """[probe uri]: looks for existing SRs on the storage device""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[probe uri]: looks for existing SRs on the storage device') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - return vars(parser.parse_args()) - def _parse_create(self): - """[create uri name description configuration]: creates a fresh SR""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[create uri name description configuration]: creates a fresh SR') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - parser.add_argument('name', action='store', help='Human-readable name for the SR') - parser.add_argument('description', action='store', help='Human-readable description for the SR') - parser.add_argument('--configuration', default={}, nargs=2, action=xapi.ListAction, help='Plugin-specific configuration which describes where and how to create the storage repository. This may include the physical block device name, a remote NFS server and path or an RBD storage pool.') - return vars(parser.parse_args()) - def _parse_attach(self): - """[attach uri]: attaches the SR to the local host. Once an SR is attached then volumes may be manipulated.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[attach uri]: attaches the SR to the local host. Once an SR is attached then volumes may be manipulated.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('uri', action='store', help='The Storage Repository URI') - return vars(parser.parse_args()) - def _parse_detach(self): - """[detach sr]: detaches the SR, clearing up any associated resources. Once the SR is detached then volumes may not be manipulated.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[detach sr]: detaches the SR, clearing up any associated resources. Once the SR is detached then volumes may not be manipulated.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_destroy(self): - """[destroy sr]: destroys the [sr] and deletes any volumes associated with it. Note that an SR must be attached to be destroyed; otherwise Sr_not_attached is thrown.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[destroy sr]: destroys the [sr] and deletes any volumes associated with it. Note that an SR must be attached to be destroyed; otherwise Sr_not_attached is thrown.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_stat(self): - """[stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[stat sr] returns summary metadata associated with [sr]. Note this call does not return details of sub-volumes, see SR.ls.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def _parse_set_name(self): - """[set_name sr new_name] changes the name of [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_name sr new_name] changes the name of [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('new_name', action='store', help='The new name of the SR') - return vars(parser.parse_args()) - def _parse_set_description(self): - """[set_description sr new_description] changes the description of [sr]""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[set_description sr new_description] changes the description of [sr]') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - parser.add_argument('new_description', action='store', help='The new description for the SR') - return vars(parser.parse_args()) - def _parse_ls(self): - """[ls sr] returns a list of volumes contained within an attached SR.""" - # in --json mode we don't have any other arguments - if ('--json' in sys.argv or '-j' in sys.argv): - jsondict = json.loads(sys.stdin.readline(),) - jsondict['json'] = True - return jsondict - parser = argparse.ArgumentParser(description='[ls sr] returns a list of volumes contained within an attached SR.') - parser.add_argument('-j', '--json', action='store_const', const=True, default=False, help='Read json from stdin, print json to stdout', required=False) - parser.add_argument('dbg', action='store', help='Debug context from the caller') - parser.add_argument('sr', action='store', help='The Storage Repository') - return vars(parser.parse_args()) - def probe(self): - use_json = False - try: - request = self._parse_probe() - use_json = 'json' in request and request['json'] - results = self.dispatcher.probe(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def create(self): - use_json = False - try: - request = self._parse_create() - use_json = 'json' in request and request['json'] - results = self.dispatcher.create(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def attach(self): - use_json = False - try: - request = self._parse_attach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.attach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def detach(self): - use_json = False - try: - request = self._parse_detach() - use_json = 'json' in request and request['json'] - results = self.dispatcher.detach(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def destroy(self): - use_json = False - try: - request = self._parse_destroy() - use_json = 'json' in request and request['json'] - results = self.dispatcher.destroy(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def stat(self): - use_json = False - try: - request = self._parse_stat() - use_json = 'json' in request and request['json'] - results = self.dispatcher.stat(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_name(self): - use_json = False - try: - request = self._parse_set_name() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_name(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def set_description(self): - use_json = False - try: - request = self._parse_set_description() - use_json = 'json' in request and request['json'] - results = self.dispatcher.set_description(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e - def ls(self): - use_json = False - try: - request = self._parse_ls() - use_json = 'json' in request and request['json'] - results = self.dispatcher.ls(request) - print(json.dumps(results)) - except Exception as e: - if use_json: - xapi.handle_exception(e) - else: - traceback.print_exc() - raise e -class volume_server_dispatcher: - """Demux calls to individual interface server_dispatchers""" - def __init__(self, Volume=None, SR=None): - self.Volume = Volume - self.SR = SR - def _dispatch(self, method, params): - try: - logging.debug("method = %s params = %s" % (method, repr(params))) - if method.startswith("Volume") and self.Volume: - return self.Volume._dispatch(method, params) - elif method.startswith("SR") and self.SR: - return self.SR._dispatch(method, params) - raise UnknownMethod(method) - except Exception as e: - logging.info("caught %s" % e) - traceback.print_exc() - try: - # A declared (expected) failure will have a .failure() method - logging.debug("returning %s" % (repr(e.failure()))) - return e.failure() - except AttributeError: - # An undeclared (unexpected) failure is wrapped as InternalError - return (InternalError(str(e)).failure()) -class volume_server_test(volume_server_dispatcher): - """Create a server which will respond to all calls, returning arbitrary values. This is intended as a marshal/unmarshal test.""" - def __init__(self): - volume_server_dispatcher.__init__(self, Volume_server_dispatcher(Volume_test()), SR_server_dispatcher(SR_test())) \ No newline at end of file From 724a1c094fe2729882d15f3e6c0148df9ea6e15e Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 24 Jul 2024 06:02:13 +0100 Subject: [PATCH 199/341] CP-49148: perfmon.service is not loaded When testing the feature/py3 branch, xapi failed to restart due to perfmon.service not loaded. The root cause is that: In this PR: https://github.com/xapi-project/xen-api/pull/5767, we moved perfmon.service from `scripts/Makefile` to `python3/Makefile` In `scripts/Makefile`, `IPROG` is defined as: IPROG=./install.sh 755 IDATA=./install.sh 644 While in `python3/Makefile`, `IPROG` is defined as: IPROG=install -m 755 IDATA=install -m 644 And the purpose of `install.sh` is to replace strings in *.service like: @OPTDIR@ -> ${OPTDIR} So in python3/Makefile, we didn't replace these strings, and then led to the error. Signed-off-by: Stephen Cheng --- python3/Makefile | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/python3/Makefile b/python3/Makefile index 0600c90646b..514d21e5cbd 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -1,21 +1,22 @@ include ../config.mk -IPROG=install -m 755 -IDATA=install -m 644 +# To replace strings in *.service like: @OPTDIR@ -> ${OPTDIR} +IPROG=../scripts/install.sh 755 +IDATA=../scripts/install.sh 644 SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") DNF_PLUGIN_DIR=dnf-plugins install: # Create destination directories using install -m 755 -d: - $(IPROG) -d $(DESTDIR)$(OPTDIR)/bin - $(IPROG) -d $(DESTDIR)$(SITE3_DIR) - $(IPROG) -d $(DESTDIR)$(LIBEXECDIR) - $(IPROG) -d $(DESTDIR)$(PLUGINDIR) - $(IPROG) -d $(DESTDIR)/etc/sysconfig - $(IPROG) -d $(DESTDIR)/usr/lib/systemd/system - $(IPROG) -d $(DESTDIR)$(EXTENSIONDIR) - $(IPROG) -d $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR) + install -m 755 -d $(DESTDIR)$(OPTDIR)/bin + install -m 755 -d $(DESTDIR)$(SITE3_DIR) + install -m 755 -d $(DESTDIR)$(LIBEXECDIR) + install -m 755 -d $(DESTDIR)$(PLUGINDIR) + install -m 755 -d $(DESTDIR)/etc/sysconfig + install -m 755 -d $(DESTDIR)/usr/lib/systemd/system + install -m 755 -d $(DESTDIR)$(EXTENSIONDIR) + install -m 755 -d $(DESTDIR)$(SITE3_DIR)/$(DNF_PLUGIN_DIR) $(IDATA) packages/inventory.py $(DESTDIR)$(SITE3_DIR)/ $(IDATA) packages/observer.py $(DESTDIR)$(SITE3_DIR)/ From 04377fe1b9654d502ba5f60010f852eb92511345 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Tue, 30 Jul 2024 07:32:00 +0100 Subject: [PATCH 200/341] CP-49148: Fix ambiguous python shebang for XS9 When building xapi for XS9, ran into errors: *** ERROR: ambiguous python shebang in /opt/xensource/libexec/restore-sr-metadata.py: #!/usr/bin/python. Change it to python3 (or python2) explicitly. Explicitly use python3. Signed-off-by: Stephen Cheng --- python3/libexec/restore-sr-metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python3/libexec/restore-sr-metadata.py b/python3/libexec/restore-sr-metadata.py index 4bbb9fe55af..7fa4e92aa18 100644 --- a/python3/libexec/restore-sr-metadata.py +++ b/python3/libexec/restore-sr-metadata.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # Restore SR metadata and VDI names from an XML file # (c) Anil Madhavapeddy, Citrix Systems Inc, 2008 From 2facf218941867b4bd0496ddc1383785a619a79d Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 31 Jul 2024 09:52:44 +0100 Subject: [PATCH 201/341] CP-49148: fix pylint warning This `import errno` was added by feature/py3 branch to fix pytype issues. But in the master branch, there was also a change to fix pytype issues. Use master code and remove the import. Signed-off-by: Stephen Cheng --- scripts/examples/python/XenAPI/XenAPI.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/examples/python/XenAPI/XenAPI.py b/scripts/examples/python/XenAPI/XenAPI.py index 9670502ffa9..e8ed75d0cb4 100644 --- a/scripts/examples/python/XenAPI/XenAPI.py +++ b/scripts/examples/python/XenAPI/XenAPI.py @@ -54,7 +54,6 @@ # OF THIS SOFTWARE. # -------------------------------------------------------------------- -import errno import gettext import os import socket From 81f12401daec568e76c87afb95e3639cc48244c4 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 31 Jul 2024 11:16:33 +0100 Subject: [PATCH 202/341] CP-49148: Remove unused xc.py - Use python3 shebang for pytype_reporter.py and test_usb_scan.py - Remove unused xc.py Signed-off-by: Stephen Cheng --- python3/tests/test_usb_scan.py | 2 +- pytype_reporter.py | 2 +- scripts/xc.py | 12 ------------ 3 files changed, 2 insertions(+), 14 deletions(-) delete mode 100644 scripts/xc.py diff --git a/python3/tests/test_usb_scan.py b/python3/tests/test_usb_scan.py index ad72c0cd928..8b886194c74 100644 --- a/python3/tests/test_usb_scan.py +++ b/python3/tests/test_usb_scan.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # unittest for usb_scan.py diff --git a/pytype_reporter.py b/pytype_reporter.py index 4e7d91f172b..b94ed948786 100755 --- a/pytype_reporter.py +++ b/pytype_reporter.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """GitHub action workflow Runner for pytype which works also locally without GitHub""" import json import re diff --git a/scripts/xc.py b/scripts/xc.py deleted file mode 100644 index 25723e2e7e0..00000000000 --- a/scripts/xc.py +++ /dev/null @@ -1,12 +0,0 @@ - -class xc : - def __init__(self): - self.d = {"XenServer" : "SDK"} - self.s = "SDK" - def readconsolering(self): - return self.s - def physinfo(self): - return self.d - def xeninfo(self): - return self.d - From 946ca371546a76349005d0f7d552c4853bd0e099 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Wed, 31 Jul 2024 12:21:32 +0100 Subject: [PATCH 203/341] CP-49148: Convert rrdd-example.py to python3 Signed-off-by: Stephen Cheng --- ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py | 31 +++++++++++---------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py index e25e0ddf016..17f7c2398f4 100755 --- a/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py +++ b/ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py @@ -1,18 +1,19 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -import rrdd, os +import os +import rrdd if __name__ == "__main__": - # Create a proxy for communicating with xcp-rrdd. - api = rrdd.API(plugin_id="host_mem") - while True: - # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. - api.wait_until_next_reading(neg_shift=.5) - # Collect measurements. - cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" - vs = os.popen(cmd).read().strip().split() - # Tell the proxy which datasources should be exposed in this iteration. - api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") - api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") - # Write all required information into a file about to be read by xcp-rrdd. - api.update() + # Create a proxy for communicating with xcp-rrdd. + api = rrdd.API(plugin_id="host_mem") + while True: + # Wait until 0.5 seconds before xcp-rrdd is going to read the output file. + api.wait_until_next_reading(neg_shift=.5) + # Collect measurements. + cmd = "free -k | grep Mem | awk '{print $2, $3, $4}'" + vs = os.popen(cmd).read().strip().split() + # Tell the proxy which datasources should be exposed in this iteration. + api.set_datasource("used_mem", vs[1], min_val=0, max_val=vs[0], units="KB") + api.set_datasource("free_mem", vs[2], min_val=0, max_val=vs[0], units="KB") + # Write all required information into a file about to be read by xcp-rrdd. + api.update() From 8899586a4956491ac389a0491f4f47f22b0ed87e Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 1 Aug 2024 11:29:16 +0100 Subject: [PATCH 204/341] CP-46112: Remove python2 compatible code from XenAPI Signed-off-by: Stephen Cheng --- scripts/Makefile | 4 ---- scripts/examples/python/XenAPI/XenAPI.py | 21 ++++++--------------- scripts/examples/python/XenAPIPlugin.py | 7 ++----- 3 files changed, 8 insertions(+), 24 deletions(-) diff --git a/scripts/Makefile b/scripts/Makefile index 87302dca48f..98d00f7c0b6 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -134,10 +134,6 @@ install: $(IPROG) host-backup-restore/host-backup $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-backup-restore/host-restore $(DESTDIR)$(LIBEXECDIR) # example/python -ifneq ($(BUILD_PY2), NO) - $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE_DIR)/ - $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE_DIR)/ -endif $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ diff --git a/scripts/examples/python/XenAPI/XenAPI.py b/scripts/examples/python/XenAPI/XenAPI.py index e8ed75d0cb4..722a9e6e965 100644 --- a/scripts/examples/python/XenAPI/XenAPI.py +++ b/scripts/examples/python/XenAPI/XenAPI.py @@ -1,3 +1,4 @@ +#!/usr/bin/python3 # Copyright (c) Citrix Systems, Inc. # All rights reserved. # @@ -58,13 +59,8 @@ import os import socket import sys - -if sys.version_info[0] == 2: - import httplib as httplib - import xmlrpclib as xmlrpclib -else: - import http.client as httplib - import xmlrpc.client as xmlrpclib +import http.client as httplib +import xmlrpc.client as xmlrpclib otel = False try: @@ -150,15 +146,10 @@ class Session(xmlrpclib.ServerProxy): def __init__(self, uri, transport=None, encoding=None, verbose=False, allow_none=True, ignore_ssl=False): - if sys.version_info[0] > 2: - # this changed to be a 'bool' in Python3 - verbose = bool(verbose) - allow_none = bool(allow_none) + verbose = bool(verbose) + allow_none = bool(allow_none) - # Fix for CA-172901 (+ Python 2.4 compatibility) - # Fix for context=ctx ( < Python 2.7.9 compatibility) - if not (sys.version_info[0] <= 2 and sys.version_info[1] <= 7 and sys.version_info[2] <= 9 ) \ - and ignore_ssl: + if ignore_ssl: import ssl ctx = ssl._create_unverified_context() xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, diff --git a/scripts/examples/python/XenAPIPlugin.py b/scripts/examples/python/XenAPIPlugin.py index 82f1f2f8531..43744432843 100644 --- a/scripts/examples/python/XenAPIPlugin.py +++ b/scripts/examples/python/XenAPIPlugin.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """XenAPI python plugin boilerplate code""" # pylint: disable=invalid-name # Module name "XenAPIPlugin" doesn't conform to snake_case naming style @@ -9,11 +10,7 @@ import sys import XenAPI - -if sys.version_info[0] == 2: - import xmlrpclib -else: - import xmlrpc.client as xmlrpclib +import xmlrpc.client as xmlrpclib class Failure(Exception): """Provide compatibility with plugins written against the XenServer 5.5 API""" From 0e5ff89ce84b6ace81e257a2da69f1b38d458b3c Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Thu, 1 Aug 2024 13:11:01 +0100 Subject: [PATCH 205/341] CP-46112: Relocate XenAPI to python3 directory Signed-off-by: Stephen Cheng --- .github/workflows/release.yml | 2 +- .gitignore | 8 ++++---- Makefile | 4 ++-- ocaml/sdk-gen/README.md | 2 +- pyproject.toml | 3 +++ python3/Makefile | 5 +++++ {scripts/examples/python => python3/examples}/Makefile | 0 {scripts/examples/python => python3/examples}/README.md | 2 +- .../examples/python => python3/examples}/XenAPI/XenAPI.py | 0 .../python => python3/examples}/XenAPI/__init__.py | 0 .../examples/python => python3/examples}/XenAPIPlugin.py | 0 .../examples/python => python3/examples}/pyproject.toml | 2 +- {scripts/examples/python => python3/examples}/setup.cfg | 0 scripts/Makefile | 4 ---- 14 files changed, 18 insertions(+), 14 deletions(-) rename {scripts/examples/python => python3/examples}/Makefile (100%) rename {scripts/examples/python => python3/examples}/README.md (77%) rename {scripts/examples/python => python3/examples}/XenAPI/XenAPI.py (100%) rename {scripts/examples/python => python3/examples}/XenAPI/__init__.py (100%) rename {scripts/examples/python => python3/examples}/XenAPIPlugin.py (100%) rename {scripts/examples/python => python3/examples}/pyproject.toml (89%) rename {scripts/examples/python => python3/examples}/setup.cfg (100%) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9a051ef15f9..830d94e969e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: XenAPI - path: scripts/examples/python/dist/ + path: python3/examples/dist/ build-sdks: name: Build and upload SDK artifacts diff --git a/.gitignore b/.gitignore index 967e463c15f..b519eb9cb39 100644 --- a/.gitignore +++ b/.gitignore @@ -16,10 +16,10 @@ config.mk # python packaging **/__pycache__/ **/*.pyc -scripts/examples/python/setup.py -scripts/examples/python/XenAPI.egg-info/ -scripts/examples/python/build/ -scripts/examples/python/dist/ +python3/examples/setup.py +python3/examples/XenAPI.egg-info/ +python3/examples/build/ +python3/examples/dist/ # ignore file needed for building the SDK ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml diff --git a/Makefile b/Makefile index 7bff3e3aca6..a73d939a8b2 100644 --- a/Makefile +++ b/Makefile @@ -121,7 +121,7 @@ sdk: cp -r _build/default/ocaml/sdk-gen/java/autogen/* $(XAPISDK)/java cp -r _build/default/ocaml/sdk-gen/powershell/autogen/* $(XAPISDK)/powershell cp -r _build/default/ocaml/sdk-gen/go/autogen/* $(XAPISDK)/go - cp scripts/examples/python/XenAPI/XenAPI.py $(XAPISDK)/python + cp python3/examples/XenAPI/XenAPI.py $(XAPISDK)/python sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/csharp sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/powershell @@ -136,7 +136,7 @@ sdk-build-java: sdk cd _build/install/default/xapi/sdk/java && mvn -f xen-api/pom.xml -B clean package install -Drevision=0.0 python: - $(MAKE) -C scripts/examples/python build + $(MAKE) -C python3/examples build doc-json: dune exec --profile=$(PROFILE) -- ocaml/idl/json_backend/gen_json.exe -destdir $(XAPIDOC)/jekyll diff --git a/ocaml/sdk-gen/README.md b/ocaml/sdk-gen/README.md index fa45a1c3803..1cb1f2a7238 100644 --- a/ocaml/sdk-gen/README.md +++ b/ocaml/sdk-gen/README.md @@ -9,7 +9,7 @@ XenAPI's datamodel. The generation code is written in OCaml and is contained in this directory. The Python module is not auto-generated, it can be found at -[XenAPI.py](../../scripts/examples/python/XenAPI/XenAPI.py). +[XenAPI.py](../../python3/examples/XenAPI/XenAPI.py). To compile the generated source code, follow the instructions in the corresponding `README` files. diff --git a/pyproject.toml b/pyproject.toml index 630f6c51e25..8a7ca2dc9fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -227,6 +227,9 @@ exclude = [ "ocaml/xcp-rrdd/scripts/rrdd/rrdd.py", "ocaml/xcp-rrdd/scripts/rrdd/rrdd-example.py", "python3/packages/observer.py", + "python3/examples/XenAPI/XenAPI.py", + "python3/examples/XenAPIPlugin.py", + ] diff --git a/python3/Makefile b/python3/Makefile index 13bc58546c0..81735c73e16 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -52,6 +52,11 @@ install: $(IDATA) perfmon/perfmon.service $(DESTDIR)/usr/lib/systemd/system/perfmon.service $(IPROG) perfmon/sysconfig-perfmon $(DESTDIR)/etc/sysconfig/perfmon +# example/python + $(IDATA) examples/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ + sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py + $(IDATA) examples/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ + # poweron $(IPROG) poweron/wlan.py $(DESTDIR)$(PLUGINDIR)/wlan.py diff --git a/scripts/examples/python/Makefile b/python3/examples/Makefile similarity index 100% rename from scripts/examples/python/Makefile rename to python3/examples/Makefile diff --git a/scripts/examples/python/README.md b/python3/examples/README.md similarity index 77% rename from scripts/examples/python/README.md rename to python3/examples/README.md index 7761002ac70..f896978fee3 100644 --- a/scripts/examples/python/README.md +++ b/python3/examples/README.md @@ -7,7 +7,7 @@ To install the package, enable the virtual environment where it's going to be us Examples -------- -The [examples](https://github.com/xapi-project/xen-api/tree/master/scripts/examples/python) will not work unless they have been placed in the same directory as `XenAPI.py` or `XenAPI` package from PyPI has been installed (`pip install XenAPI`) +The [examples](https://github.com/xapi-project/xen-api/tree/master/python3/examples) will not work unless they have been placed in the same directory as `XenAPI.py` or `XenAPI` package from PyPI has been installed (`pip install XenAPI`) Packaging ========= diff --git a/scripts/examples/python/XenAPI/XenAPI.py b/python3/examples/XenAPI/XenAPI.py similarity index 100% rename from scripts/examples/python/XenAPI/XenAPI.py rename to python3/examples/XenAPI/XenAPI.py diff --git a/scripts/examples/python/XenAPI/__init__.py b/python3/examples/XenAPI/__init__.py similarity index 100% rename from scripts/examples/python/XenAPI/__init__.py rename to python3/examples/XenAPI/__init__.py diff --git a/scripts/examples/python/XenAPIPlugin.py b/python3/examples/XenAPIPlugin.py similarity index 100% rename from scripts/examples/python/XenAPIPlugin.py rename to python3/examples/XenAPIPlugin.py diff --git a/scripts/examples/python/pyproject.toml b/python3/examples/pyproject.toml similarity index 89% rename from scripts/examples/python/pyproject.toml rename to python3/examples/pyproject.toml index f556f2539ab..5a429e1a0c7 100644 --- a/scripts/examples/python/pyproject.toml +++ b/python3/examples/pyproject.toml @@ -3,4 +3,4 @@ requires = ["setuptools >= 38.6.0", "setuptools_scm[toml]", "wheel"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] -root = "../../.." +root = "../.." diff --git a/scripts/examples/python/setup.cfg b/python3/examples/setup.cfg similarity index 100% rename from scripts/examples/python/setup.cfg rename to python3/examples/setup.cfg diff --git a/scripts/Makefile b/scripts/Makefile index 98d00f7c0b6..15ad2c62d51 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -133,10 +133,6 @@ install: # host-backup-restore $(IPROG) host-backup-restore/host-backup $(DESTDIR)$(LIBEXECDIR) $(IPROG) host-backup-restore/host-restore $(DESTDIR)$(LIBEXECDIR) -# example/python - $(IDATA) examples/python/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ - sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py - $(IDATA) examples/python/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ # YUM plugins $(IPROG) yum-plugins/accesstoken.py $(DESTDIR)$(YUMPLUGINDIR) $(IDATA) yum-plugins/accesstoken.conf $(DESTDIR)$(YUMPLUGINCONFDIR) From 17da7407513ef64606fb111c342857d721e76c40 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 2 Aug 2024 05:28:59 +0100 Subject: [PATCH 206/341] Fix pylint warnings Signed-off-by: Stephen Cheng --- python3/examples/XenAPIPlugin.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python3/examples/XenAPIPlugin.py b/python3/examples/XenAPIPlugin.py index 43744432843..3d8d4871e4d 100644 --- a/python3/examples/XenAPIPlugin.py +++ b/python3/examples/XenAPIPlugin.py @@ -8,9 +8,8 @@ from __future__ import print_function import sys - -import XenAPI import xmlrpc.client as xmlrpclib +import XenAPI class Failure(Exception): """Provide compatibility with plugins written against the XenServer 5.5 API""" From eddf404f93bea75bacc9afbb26b758b9b6afeed3 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 2 Aug 2024 06:46:01 +0100 Subject: [PATCH 207/341] Delete unused sed command in python3 Makefile Signed-off-by: Stephen Cheng --- python3/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/python3/Makefile b/python3/Makefile index 81735c73e16..fed125c01bb 100644 --- a/python3/Makefile +++ b/python3/Makefile @@ -54,7 +54,6 @@ install: # example/python $(IDATA) examples/XenAPIPlugin.py $(DESTDIR)$(SITE3_DIR)/ - sed -i 's/#!\/usr\/bin\/python/#!\/usr\/bin\/python3/' $(DESTDIR)$(SITE3_DIR)/XenAPIPlugin.py $(IDATA) examples/XenAPI/XenAPI.py $(DESTDIR)$(SITE3_DIR)/ From fcd2edf42fb2ce5c2b75be1b72afd943d36510a4 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Fri, 2 Aug 2024 10:30:37 +0100 Subject: [PATCH 208/341] Remove `universal=1` from setup.cfg to only support python3 Signed-off-by: Stephen Cheng --- python3/examples/setup.cfg | 6 ------ 1 file changed, 6 deletions(-) diff --git a/python3/examples/setup.cfg b/python3/examples/setup.cfg index 47601de9c05..9a07df89928 100644 --- a/python3/examples/setup.cfg +++ b/python3/examples/setup.cfg @@ -20,9 +20,3 @@ classifiers = [options] packages = find: python_requires = >=3.6.*, <4 - -[bdist_wheel] -# This flag says that the code is written to work on both Python 2 and Python -# 3. If at all possible, it is good practice to do this. If you cannot, you -# will need to generate wheels for each Python version that you support. -universal=1 From 02a7154a67aa6feeeefce551007428fa0f32493b Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 7 Aug 2024 12:00:00 +0200 Subject: [PATCH 209/341] xenopsd: Add pytest for common.VIF.get_locking_mode() Signed-off-by: Bernhard Kaindl --- .../xenopsd/scripts/test_common_class_vif.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 ocaml/xenopsd/scripts/test_common_class_vif.py diff --git a/ocaml/xenopsd/scripts/test_common_class_vif.py b/ocaml/xenopsd/scripts/test_common_class_vif.py new file mode 100644 index 00000000000..006d1966f6d --- /dev/null +++ b/ocaml/xenopsd/scripts/test_common_class_vif.py @@ -0,0 +1,78 @@ +"""Test ocaml/xenopsd/scripts/common.VIF.get_locking_mode()""" + +from unittest.mock import patch # to check the arguments passed to send_to_syslog() + +import pytest # for pytest.parametrize to run the same test with different parameters + +import common # Tested module + + +# Mock class to simulate the object containing the get_locking_mode method +class VifMockSubclass(common.VIF): + """Mock class to simulate a VIF object containing the get_locking_mode method""" + + def __init__(self, json): # pylint: disable=super-init-not-called + """Do not call the parent constructor, it would open a file""" + self.json = json + + def get_mac(self): + return "00:11:22:33:44:55" # Expected MAC address + + +@pytest.mark.parametrize( + # Call the test case 3 times with two args: + # inp: input for VIF.get_locking_mode() + # expected_output: expected output of the get_locking_mode method + # Asserted with: + # assert expected_output == get_locking_mode(input) + "input_params, expected_output", + [ + # Happy path tests + ( + # locked + { # input + "locking_mode": [ + "locked", + {"ipv4": ["1.1.1.1"], "ipv6": ["fe80::1"]}, + ] + }, # expected output + { + "mac": "00:11:22:33:44:55", + "locking_mode": "locked", + "ipv4_allowed": ["1.1.1.1"], + "ipv6_allowed": ["fe80::1"], + }, + ), + ( + # unlocked + {"locking_mode": "unlocked"}, + { + "mac": "00:11:22:33:44:55", + "locking_mode": "unlocked", + "ipv4_allowed": [], + "ipv6_allowed": [], + }, + ), + ( + {}, # no locking_mode + { + "mac": "00:11:22:33:44:55", + "locking_mode": "", + "ipv4_allowed": [], + "ipv6_allowed": [], + }, + ), + ], +) +def test_get_locking_mode(input_params, expected_output): + """Test VIF.get_locking_mode() using the VIF class test parameters defined above.""" + + # Act: Get the locking mode configuration for the input params from the VIF object: + with patch("common.send_to_syslog") as send_to_syslog: + test_result = VifMockSubclass(input_params).get_locking_mode() + + # Assert the expected output and the expected call to send_to_syslog(): + assert test_result == expected_output + send_to_syslog.assert_called_once_with( + "Got locking config: " + repr(expected_output) + ) From 2e883b7f35b910387da59e905c27cc41d48af5a9 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Sat, 3 Aug 2024 09:54:29 +0800 Subject: [PATCH 210/341] Removed shebang from XenAPI.py and XenAPIPlugin.py as they are library code. - Removed shebang from XenAPI.py and XenAPIPlugin.py - Modified setup.cfg of python_requires version - Removed unused `dune clean` command from python Makefile Co-authored-by: Pau Ruiz Safont Signed-off-by: Stephen Cheng --- python3/examples/Makefile | 1 - python3/examples/XenAPI/XenAPI.py | 1 - python3/examples/XenAPIPlugin.py | 1 - python3/examples/setup.cfg | 2 +- 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/python3/examples/Makefile b/python3/examples/Makefile index 251f747250d..ac84bf6ba77 100644 --- a/python3/examples/Makefile +++ b/python3/examples/Makefile @@ -8,5 +8,4 @@ build: SETUPTOOLS_SCM_PRETEND_VERSION=$(XAPI_VERSION) python -m build --sdist . clean: - dune clean rm -rf dist/ build/ XenAPI.egg-info/ diff --git a/python3/examples/XenAPI/XenAPI.py b/python3/examples/XenAPI/XenAPI.py index 722a9e6e965..e37f8813b6e 100644 --- a/python3/examples/XenAPI/XenAPI.py +++ b/python3/examples/XenAPI/XenAPI.py @@ -1,4 +1,3 @@ -#!/usr/bin/python3 # Copyright (c) Citrix Systems, Inc. # All rights reserved. # diff --git a/python3/examples/XenAPIPlugin.py b/python3/examples/XenAPIPlugin.py index 3d8d4871e4d..49998457783 100644 --- a/python3/examples/XenAPIPlugin.py +++ b/python3/examples/XenAPIPlugin.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """XenAPI python plugin boilerplate code""" # pylint: disable=invalid-name # Module name "XenAPIPlugin" doesn't conform to snake_case naming style diff --git a/python3/examples/setup.cfg b/python3/examples/setup.cfg index 9a07df89928..b2c23c40369 100644 --- a/python3/examples/setup.cfg +++ b/python3/examples/setup.cfg @@ -19,4 +19,4 @@ classifiers = [options] packages = find: -python_requires = >=3.6.*, <4 +python_requires = >=3.6, <4 From c8d9d1322e43f49b2383ce170dd5379f35f90a60 Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Sat, 3 Aug 2024 03:54:10 +0100 Subject: [PATCH 211/341] Remove python2 related CI Signed-off-by: Stephen Cheng --- .github/workflows/main.yml | 2 +- .github/workflows/other.yml | 17 +---------------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d4bf28aaab2..5ee2ee8da05 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,7 +45,7 @@ jobs: - name: Make install smoketest run: | opam exec -- make install DESTDIR=$(mktemp -d) - opam exec -- make install DESTDIR=$(mktemp -d) BUILD_PY2=NO + opam exec -- make install DESTDIR=$(mktemp -d) - name: Check disk space run: df -h || true diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 58cc7c8cdfe..c4042638922 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["2.7", "3.11"] + python-version: ["3.11"] steps: - name: Checkout code uses: actions/checkout@v4 @@ -39,24 +39,11 @@ jobs: - uses: pre-commit/action@v3.0.1 name: Run pre-commit checks (no spaces at end of lines, etc) - if: ${{ matrix.python-version != '2.7' }} with: extra_args: --all-files --verbose --hook-stage commit env: SKIP: no-commit-to-branch - - name: Run Pytest for python 2 and get code coverage - if: ${{ matrix.python-version == '2.7' }} - run: > - pip install enum future mock pytest-coverage pytest-mock && - pytest -vv -rA --cov=ocaml ocaml - --cov-report term-missing - --cov-report xml:.git/coverage${{matrix.python-version}}.xml - --cov-fail-under 50 - env: - PYTHONDEVMODE: yes - PYTHONPATH: "python3:python3/stubs" - - name: Upload coverage report to Coveralls uses: coverallsapp/github-action@v2 with: @@ -66,7 +53,6 @@ jobs: parallel: true - uses: dciborow/action-pylint@0.1.0 - if: ${{ matrix.python-version != '2.7' }} with: reporter: github-pr-review level: warning @@ -75,7 +61,6 @@ jobs: continue-on-error: true - name: Run pytype checks - if: ${{ matrix.python-version != '2.7' }} run: pip install pandas pytype toml && ./pytype_reporter.py env: PR_NUMBER: ${{ github.event.number }} From e7c9da840a6800a646c0414fa07dcaa455602a70 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 7 Aug 2024 12:00:00 +0200 Subject: [PATCH 212/341] xenopsd: Fix warnings: remove inner function, use isinstance Signed-off-by: Bernhard Kaindl --- ocaml/xenopsd/scripts/common.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/ocaml/xenopsd/scripts/common.py b/ocaml/xenopsd/scripts/common.py index af8666ce62c..323b5ba06eb 100755 --- a/ocaml/xenopsd/scripts/common.py +++ b/ocaml/xenopsd/scripts/common.py @@ -192,28 +192,33 @@ def get_external_ids(self): results["xs-network-uuid"] = self.json["extra_private_keys"]["network-uuid"] results["attached-mac"] = self.get_mac() return results + def get_locking_mode(self): - def get_words(value, separator): - if string.strip(value) == "": - return [] - else: - return string.split(value, separator) + """ + Get the locking mode configuration for the VIF. + + :returns dict: A dictionary containing the locking mode configuration with keys: + - mac: The MAC address + - locking_mode: The locking mode + - ipv4_allowed: List of IPv4 addresses allowed + - ipv6_allowed: List of IPv6 addresses allowed + """ results = { "mac": self.get_mac(), "locking_mode": "", "ipv4_allowed": [], - "ipv6_allowed": [] + "ipv6_allowed": [], } if "locking_mode" in self.json: - if type(self.json["locking_mode"]) is list: - # Must be type=locked here + if isinstance(self.json["locking_mode"], list): + # Must be type=locked and have keys for allowed ipv4 and ipv6 addresses results["locking_mode"] = self.json["locking_mode"][0].lower() - locked_params=self.json["locking_mode"][1] + locked_params = self.json["locking_mode"][1] results["ipv4_allowed"] = locked_params["ipv4"] results["ipv6_allowed"] = locked_params["ipv6"] else: results["locking_mode"] = self.json["locking_mode"].lower() - send_to_syslog("Got locking config: %s" % (repr(results))) + send_to_syslog("Got locking config: " + repr(results)) return results class Interface: From f4c808eea5d092d5a649467c12bf8e7ba1b4d2ec Mon Sep 17 00:00:00 2001 From: Stephen Cheng Date: Sun, 4 Aug 2024 09:59:15 +0800 Subject: [PATCH 213/341] Remove duplicated line. Co-authored-by: Pau Ruiz Safont Signed-off-by: Stephen Cheng --- .github/workflows/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5ee2ee8da05..79ce257d7f2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,7 +45,6 @@ jobs: - name: Make install smoketest run: | opam exec -- make install DESTDIR=$(mktemp -d) - opam exec -- make install DESTDIR=$(mktemp -d) - name: Check disk space run: df -h || true From fd913f6bf43a545ae0a1d6160528f31eaeea0e1b Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 8 Aug 2024 12:00:00 +0200 Subject: [PATCH 214/341] xenopsd: remove the orphaned qemu-vif-script (qemu backend was removed) In 2017, the qemu and libvirt backends were removed: https://github.com/xapi-project/xen-api/commit/90815e5fcad8a523d9f448eb6f885c54a9c2a955 With it, the use of qemu-vif-script was removed. Remove it as well. Signed-off-by: Bernhard Kaindl --- Makefile | 1 - ocaml/xenopsd/scripts/make-custom-xenopsd.conf | 1 - ocaml/xenopsd/scripts/qemu-vif-script | 15 --------------- ocaml/xenopsd/xenopsd.conf | 3 --- 4 files changed, 20 deletions(-) delete mode 100755 ocaml/xenopsd/scripts/qemu-vif-script diff --git a/Makefile b/Makefile index a73d939a8b2..337e4dad88c 100644 --- a/Makefile +++ b/Makefile @@ -237,7 +237,6 @@ install: build doc sdk doc-json install -D ./ocaml/xenopsd/scripts/block $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/block install -D ./ocaml/xenopsd/scripts/xen-backend.rules $(DESTDIR)/$(ETCDIR)/udev/rules.d/xen-backend.rules install -D ./ocaml/xenopsd/scripts/tap $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/tap - install -D ./ocaml/xenopsd/scripts/qemu-vif-script $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/qemu-vif-script install -D ./ocaml/xenopsd/scripts/setup-vif-rules $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/setup-vif-rules install -D ./_build/install/default/bin/pvs-proxy-ovs-setup $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/pvs-proxy-ovs-setup (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -s pvs-proxy-ovs-setup setup-pvs-proxy-rules) diff --git a/ocaml/xenopsd/scripts/make-custom-xenopsd.conf b/ocaml/xenopsd/scripts/make-custom-xenopsd.conf index b49610f0e9a..59f52269157 100755 --- a/ocaml/xenopsd/scripts/make-custom-xenopsd.conf +++ b/ocaml/xenopsd/scripts/make-custom-xenopsd.conf @@ -41,7 +41,6 @@ vif-script=${XENOPSD_LIBEXECDIR}/vif vif-xl-script=${XENOPSD_LIBEXECDIR}/vif vbd-script=${XENOPSD_LIBEXECDIR}/block vbd-xl-script=${XENOPSD_LIBEXECDIR}/block -qemu-vif-script=${XENOPSD_LIBEXECDIR}/qemu-vif-script setup-vif-rules=${XENOPSD_LIBEXECDIR}/setup-vif-rules sockets-group=$group qemu-wrapper=${QEMU_WRAPPER_DIR}/qemu-wrapper diff --git a/ocaml/xenopsd/scripts/qemu-vif-script b/ocaml/xenopsd/scripts/qemu-vif-script deleted file mode 100755 index a8fe976e3a1..00000000000 --- a/ocaml/xenopsd/scripts/qemu-vif-script +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 - - -from common import * -import sys - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage:", file=sys.stderr) - print(" %s " % sys.argv[0], file=sys.stderr) - sys.exit(1) - name = sys.argv[1] - send_to_syslog("setting up interface %s" % name) - i = Interface(name) - i.online() diff --git a/ocaml/xenopsd/xenopsd.conf b/ocaml/xenopsd/xenopsd.conf index 94fcafefbd0..e80194c1f55 100644 --- a/ocaml/xenopsd/xenopsd.conf +++ b/ocaml/xenopsd/xenopsd.conf @@ -61,9 +61,6 @@ disable-logging-for=http tracing tracing_export # Path to the vbd backend script # vbd-xl-script=/usr/lib/xcp/scripts/block -# Path to the qemu vif script -# qemu-vif-script=/etc/xcp/scripts/qemu-vif-script - # Path to the PCI FLR script # pci-flr-script=/opt/xensource/libexec/pci-flr From 8e6d4bf265b957607549e7c3a1add0afc9e2a139 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 7 Aug 2024 12:00:00 +0200 Subject: [PATCH 215/341] xenopsd: remove the orphaned common.Interface.online() method With qemu-vif-script removed, common.Interface.online() is orphaned, remove it. Signed-off-by: Bernhard Kaindl --- ocaml/xenopsd/scripts/common.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/ocaml/xenopsd/scripts/common.py b/ocaml/xenopsd/scripts/common.py index 323b5ba06eb..52157c2dd07 100755 --- a/ocaml/xenopsd/scripts/common.py +++ b/ocaml/xenopsd/scripts/common.py @@ -228,17 +228,3 @@ def __init__(self, vif_name, uuid, devid): self.vif = VIF(vif_name, uuid, int(devid)) def get_vif(self): return self.vif - def online(self): - v = self.get_vif() - mode = v.get_mode() - for (key, value) in v.get_ethtool(): - set_ethtool(mode, self.name, key, value) - set_mtu(mode, self.name, v.get_mtu()) - add_to_bridge(mode, self.name, v.get_bridge(), v.get_address(), v.get_external_ids()) - add_vif_rules(self.name) - set_promiscuous(mode, self.name, v.get_promiscuous()) - -#def add(mode, dev, bridge, address, external_ids): -# add_to_bridge(mode, dev, bridge, address, external_ids) - - From 7e356779d630889836436b3108b832ba5d9acf06 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 7 Aug 2024 12:00:00 +0200 Subject: [PATCH 216/341] xenopsd: as Interface.online() is removed, remove get_ethtool() too Signed-off-by: Bernhard Kaindl --- ocaml/xenopsd/scripts/common.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/ocaml/xenopsd/scripts/common.py b/ocaml/xenopsd/scripts/common.py index 52157c2dd07..26adb00c0d2 100755 --- a/ocaml/xenopsd/scripts/common.py +++ b/ocaml/xenopsd/scripts/common.py @@ -154,18 +154,7 @@ def get_bridge(self): return network[1] def get_address(self): return "fe:ff:ff:ff:ff:ff" - def get_ethtool(self): - results = [] - for (k, v) in self.json["other_config"]: - if k.startswith("ethtool-"): - k = k[len("ethtool-"):] - if v == "true" or v == "on": - results.append(k, True) - elif v == "false" or v == "off": - results.append(k, False) - else: - send_to_syslog("VIF %s/%d: ignoring ethtool argument %s=%s (use true/false)" % (self.vm_uuid, self.devid, k, v)) - return results + def get_mac(self): return self.json["mac"] def get_mtu(self): From 806a49ccb10f0ff82ed60d1088d679c486b8eb30 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 7 Aug 2024 12:00:00 +0200 Subject: [PATCH 217/341] loop+blkback example: Fix pytype warning: Handle None(not found) Signed-off-by: Bernhard Kaindl --- .../examples/datapath/loop+blkback/datapath.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py index f076b700a6f..10b1959e05c 100755 --- a/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py +++ b/ocaml/xapi-storage/python/examples/datapath/loop+blkback/datapath.py @@ -75,20 +75,22 @@ def attach(self, dbg, uri, domain): call(dbg, cmd) loop = Loop.from_path(dbg, file_path) + if not loop: + return {} return {"implementations": [ [ - 'XenDisk', + "XenDisk", { - 'backend_type': 'vbd', - 'params': loop.block_device(), - 'extra': {} + "backend_type": "vbd", + "params": loop.block_device(), + "extra": {} } ], [ - 'BlockDevice', + "BlockDevice", { - 'path': loop.block_device() + "path": loop.block_device() } ] ]} From 50ae66c9257e0c7bfbbf2a554237eeed2c5b6842 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Thu, 8 Aug 2024 12:00:00 +0200 Subject: [PATCH 218/341] pytype: Enable checking on ocaml dirs, fix pythonpath Signed-off-by: Bernhard Kaindl --- pyproject.toml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8a7ca2dc9fc..1881ebbd350 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -291,13 +291,12 @@ expected_to_fail = [ [tool.pytype] inputs = [ - # Python 3 "python3/", "ocaml/xcp-rrdd", - - # To be added later, - # when converted to Python3-compatible syntax: - # "ocaml/xapi-storage/python", + "ocaml/xenopsd", + "ocaml/xapi-storage/python", + "ocaml/xapi-storage-script", + "ocaml/vhd-tool", ] disable = [ # Reduce noise from python2 scripts(import yum, xenfsimage, xcp, urlgrabber) @@ -305,4 +304,4 @@ disable = [ ] platform = "linux" # Allow pytype to find the XenAPI module, the rrdd module and python3 modules: -pythonpath = "scripts/examples/python:.:scripts:scripts/plugins:scripts/examples" +pythonpath = "python3/examples:." From dda5205d1ab700a8ad5a9f9aa92caeb3e138c18f Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Tue, 23 Apr 2024 08:29:23 +0100 Subject: [PATCH 219/341] Use templates to generate `Types.java` Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/main.ml | 397 ++++++++++---------- ocaml/sdk-gen/java/templates/Types.mustache | 256 +++++++++++++ 2 files changed, 452 insertions(+), 201 deletions(-) create mode 100644 ocaml/sdk-gen/java/templates/Types.mustache diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 483d8689db1..8e7777bcae0 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -246,8 +246,6 @@ let get_method_params_for_xml message params = else "this.ref" :: List.map f params -let rec range = function 0 -> [] | i -> range (i - 1) @ [i] - (* Here is the main method generating function.*) let gen_method file cls message params async_version = let return_type = @@ -658,235 +656,137 @@ import java.io.IOException; (* Generate Marshalling Class *) (*This generates the special case code for marshalling the snapshot field in an Event.Record*) -let generate_snapshot_hack file = - fprintf file "\n" ; - fprintf file "\n" ; - fprintf file " Object a,b;\n" ; - fprintf file " a=map.get(\"snapshot\");\n" ; - fprintf file " switch(%s(record.clazz))\n" - (get_marshall_function switch_enum) ; - fprintf file " {\n" ; - List.iter - (fun x -> - fprintf file " case %17s: b = %25s(a); break;\n" - (String.uppercase_ascii x) - (get_marshall_function (Record x)) - ) - (List.map - (fun x -> x.name) - (List.filter (fun x -> not (class_is_empty x)) classes) - ) ; - fprintf file - " default: throw new RuntimeException(\"Internal error in \ - auto-generated code whilst unmarshalling event snapshot\");\n" ; - fprintf file " }\n" ; - fprintf file " record.snapshot = b;\n" -let gen_marshall_record_field file prefix field = +let generate_snapshot_hack = + {| + Object a,b; + a = map.get("snapshot"); + switch(|} + ^ get_marshall_function switch_enum + ^ {|(record.clazz)){ +|} + ^ String.concat "\n" + (List.map + (fun x -> + " case " + ^ String.uppercase_ascii x + ^ ": b = " + ^ get_marshall_function (Record x) + ^ "(a); break;" + ) + (List.map + (fun x -> x.name) + (List.filter (fun x -> not (class_is_empty x)) classes) + ) + ) + ^ {| + default: + throw new RuntimeException("Internal error in auto-generated code whilst unmarshalling event snapshot"); + } + record.snapshot = b;|} + +let gen_marshall_record_field prefix field = let ty = get_marshall_function field.ty in let name = String.concat "_" (List.rev (field.field_name :: prefix)) in let name' = camel_case name in - fprintf file " record.%s = %s(map.get(\"%s\"));\n" name' ty name + " record." ^ name ^ " = " ^ ty ^ "(map.get(\"" ^ name' ^ "\"));\n" -let rec gen_marshall_record_namespace file prefix (name, contents) = - List.iter (gen_marshall_record_contents file (name :: prefix)) contents +let rec gen_marshall_record_namespace prefix (name, contents) = + String.concat "\n" + (List.map (gen_marshall_record_contents (name :: prefix)) contents) -and gen_marshall_record_contents file prefix = function +and gen_marshall_record_contents prefix = function | Field f -> - gen_marshall_record_field file prefix f + gen_marshall_record_field prefix f | Namespace (n, cs) -> - gen_marshall_record_namespace file prefix (n, cs) ; - () - -(*Every type which may be returned by a function may also be the result of the*) -(* corresponding asynchronous task. We therefore need to generate corresponding*) -(* marshalling functions which can take the raw xml of the tasks result field*) -(* and turn it into the corresponding type. Luckily, the only things returned by*) -(* asynchronous tasks are object references and strings, so rather than implementing*) -(* the general recursive structure we'll just make one for each of the classes*) -(* that's been registered as a marshall-needing type*) - -let generate_reference_task_result_func file clstr = - fprintf file - {| /** - * Attempt to convert the {@link Task}'s result to a {@link %s} object. - * Will return null if the method cannot fetch a valid value from the {@link Task} object. - * @param task The task from which to fetch the result. - * @param connection The connection - * @return the instantiated object if a valid value was found, null otherwise. - * @throws BadServerResponse Thrown if the response from the server contains an invalid status. - * @throws XenAPIException if the call failed. - * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON. - */ -|} - clstr ; - fprintf file - " public static %s to%s(Task task, Connection connection) throws \ - IOException {\n" - clstr clstr ; - fprintf file - " return Types.to%s(parseResult(task.getResult(connection)));\n" - clstr ; - fprintf file " }\n" ; - fprintf file "\n" - -let gen_task_result_func file = function - | Ref ty -> - generate_reference_task_result_func file (class_case ty) - | _ -> - () + gen_marshall_record_namespace prefix (n, cs) (*don't generate for complicated types. They're not needed.*) -let rec gen_marshall_body file = function +let rec gen_marshall_body = function | SecretString | String -> - fprintf file " return (String) object;\n" + "return (String) object;\n" | Int -> - fprintf file " return Long.valueOf((String) object);\n" + "return Long.valueOf((String) object);\n" | Float -> - fprintf file " return (Double) object;\n" + "return (Double) object;\n" | Bool -> - fprintf file " return (Boolean) object;\n" + "return (Boolean) object;\n" | DateTime -> - fprintf file - " try {\n\ - \ return (Date) object;\n\ - \ } catch (ClassCastException e){\n\ - \ //Occasionally the date comes back as an ocaml float \ - rather than\n\ - \ //in the xmlrpc format! Catch this and convert.\n\ - \ return (new Date((long) (1000*Double.parseDouble((String) \ - object))));\n\ - \ }\n" + {| + try { + return (Date) object; + } catch (ClassCastException e){ + //Occasionally the date comes back as an ocaml float rather than + //in the xmlrpc format! Catch this and convert. + return (new Date((long) (1000*Double.parseDouble((String) object)))); + }|} | Ref ty -> - fprintf file " return new %s((String) object);\n" (class_case ty) + "return new" ^ class_case ty ^ "((String) object);\n" | Enum (name, _) -> - fprintf file " try {\n" ; - fprintf file - " return %s.valueOf(((String) \ - object).toUpperCase().replace('-','_'));\n" - (class_case name) ; - fprintf file " } catch (IllegalArgumentException ex) {\n" ; - fprintf file " return %s.UNRECOGNIZED;\n" (class_case name) ; - fprintf file " }\n" + {|try { + return |} + ^ class_case name + ^ {|.valueOf(((String) object).toUpperCase().replace('-','_')); + } catch (IllegalArgumentException ex) { + return |} + ^ class_case name + ^ {|.UNRECOGNIZED; + }|} | Set ty -> let ty_name = get_java_type ty in let marshall_fn = get_marshall_function ty in - fprintf file " Object[] items = (Object[]) object;\n" ; - fprintf file " Set<%s> result = new LinkedHashSet<>();\n" ty_name ; - fprintf file " for(Object item: items) {\n" ; - fprintf file " %s typed = %s(item);\n" ty_name marshall_fn ; - fprintf file " result.add(typed);\n" ; - fprintf file " }\n" ; - fprintf file " return result;\n" + {|Object[] items = (Object[]) object; + Set<|} + ^ ty_name + ^ {|> result = new LinkedHashSet<>(); + for(Object item: items) { + |} + ^ ty_name + ^ {| typed = |} + ^ marshall_fn + ^ {|(item); + result.add(typed); + } + return result;|} | Map (ty, ty') -> let ty_name = get_java_type ty in let ty_name' = get_java_type ty' in let marshall_fn = get_marshall_function ty in let marshall_fn' = get_marshall_function ty' in - fprintf file " var map = (Map)object;\n" ; - fprintf file " var result = new HashMap<%s,%s>();\n" ty_name - ty_name' ; - fprintf file " for(var entry: map.entrySet()) {\n" ; - fprintf file " var key = %s(entry.getKey());\n" marshall_fn ; - fprintf file " var value = %s(entry.getValue());\n" - marshall_fn' ; - fprintf file " result.put(key, value);\n" ; - fprintf file " }\n" ; - fprintf file " return result;\n" + {|var map = (Map)object; + var result = new HashMap<|} + ^ ty_name + ^ {|,|} + ^ ty_name' + ^ {|>(); + for(var entry: map.entrySet()) { + var key = |} + ^ marshall_fn + ^ {|(entry.getKey()); + var value = |} + ^ marshall_fn' + ^ {|(entry.getValue()); + result.put(key, value); + } + return result;|} | Record ty -> let contents = Hashtbl.find records ty in let cls_name = class_case ty in - fprintf file - " Map map = (Map) object;\n" ; - fprintf file " %s.Record record = new %s.Record();\n" cls_name - cls_name ; - List.iter (gen_marshall_record_contents file []) contents ; + {|Map map = (Map) object;|} + ^ cls_name + ^ {|.Record record = new |} + ^ cls_name + ^ {| .Record(); |} + ^ String.concat "" (List.map (gen_marshall_record_contents []) contents) + ^ (*Event.Record needs a special case to handle snapshots*) - if ty = "event" then generate_snapshot_hack file ; - fprintf file " return record;\n" - | Option ty -> - gen_marshall_body file ty - -let rec gen_marshall_func file ty = - match ty with - | Option x -> - if TypeSet.mem x !types then - () - else - gen_marshall_func file ty - | _ -> - let type_string = get_java_type ty in - fprintf file - {| /** - * Converts an {@link Object} to a {@link %s} object. - *
- * This method takes an {@link Object} as input and attempts to convert it into a {@link %s} object. - * If the input object is null, the method returns null. Otherwise, it creates a new {@link %s} - * object using the input object's {@link String} representation. - *
- * @param object The {@link Object} to be converted to a {@link %s} object. - * @return A {@link %s} object created from the input {@link Object}'s {@link String} representation, - * or null if the input object is null. - * @deprecated this method will not be publicly exposed in future releases of this package. - */ - @Deprecated -|} - type_string type_string type_string type_string type_string ; - let fn_name = get_marshall_function ty in - - if match ty with Map _ | Record _ -> true | _ -> false then - fprintf file " @SuppressWarnings(\"unchecked\")\n" ; - - fprintf file " public static %s %s(Object object) {\n" type_string - fn_name ; - fprintf file " if (object == null) {\n" ; - fprintf file " return null;\n" ; - fprintf file " }\n" ; - gen_marshall_body file ty ; - fprintf file " }\n\n" -(***) - -let gen_enum file name ls = - let name = class_case name in - let ls = - ("UNRECOGNIZED", "The value does not belong to this enumeration") :: ls - in - fprintf file " public enum %s {\n" name ; - let to_member_declaration (name, description) = - let escaped_description = - global_replace (regexp_string "*/") "* /" description - in - let final_description = - global_replace (regexp_string "\n") "\n * " escaped_description - in - let comment = - String.concat "\n" - [" /**"; " * " ^ final_description; " */"] - in - let json_property = - if name != "UNRECOGNIZED" then - {|@JsonProperty("|} ^ name ^ {|")|} + if ty = "event" then + generate_snapshot_hack else - "@JsonEnumDefaultValue" - in - comment ^ "\n " ^ json_property ^ "\n " ^ enum_of_wire name - in - fprintf file "%s" (String.concat ",\n" (List.map to_member_declaration ls)) ; - fprintf file ";\n" ; - fprintf file " public String toString() {\n" ; - List.iter - (fun (enum, _) -> - fprintf file " if (this == %s) return \"%s\";\n" - (enum_of_wire enum) enum - ) - ls ; - fprintf file " /* This can never be reached */\n" ; - fprintf file " return \"UNRECOGNIZED\";\n" ; - fprintf file " }\n" ; - fprintf file "\n }\n\n" - -let gen_enums file = Hashtbl.iter (gen_enum file) enums + " return record;" + | Option ty -> + gen_marshall_body ty let gen_error_field_name field = camel_case (String.concat "_" (Astring.String.cuts ~sep:" " field)) @@ -1105,12 +1005,107 @@ let populate_releases templdir class_dir = ("APIVersion.mustache", "APIVersion.java") json_releases templdir class_dir +let populate_types templdir class_dir = + let list_errors = + Hashtbl.fold (fun k v acc -> (k, v) :: acc) Datamodel.errors [] + in + let errors = + List.map + (fun (_, error) -> + let class_name = exception_class_case error.err_name in + let err_params = + List.mapi + (fun index value -> + `O + [ + ("name", `String (gen_error_field_name value)) + ; ("index", `Float (Int.to_float index)) + ; ("last", `Bool (index == List.length error.err_params - 1)) + ] + ) + error.err_params + in + `O + [ + ("description", `String (escape_xml error.err_doc)) + ; ("class_name", `String class_name) + ; ("err_params", `A err_params) + ] + ) + list_errors + in + let list_enums = Hashtbl.fold (fun k v acc -> (k, v) :: acc) enums [] in + let enums = + List.map + (fun (enum_name, enum_values) -> + let class_name = class_case enum_name in + let mapped_values = + List.map + (fun (name, description) -> + let escaped_description = + global_replace (regexp_string "*/") "* /" description + in + let final_description = + global_replace (regexp_string "\n") "\n * " + escaped_description + in + `O + [ + ("name", `String name) + ; ("name_uppercase", `String (enum_of_wire name)) + ; ("description", `String final_description) + ] + ) + enum_values + in + `O [("class_name", `String class_name); ("values", `A mapped_values)] + ) + list_enums + in + let list_types = TypeSet.fold (fun t acc -> t :: acc) !types [] in + let types = + List.map + (fun t -> + let type_string = get_java_type t in + let class_name = class_case type_string in + let method_name = get_marshall_function t in + (*Every type which may be returned by a function may also be the result of the*) + (* corresponding asynchronous task. We therefore need to generate corresponding*) + (* marshalling functions which can take the raw xml of the tasks result field*) + (* and turn it into the corresponding type. Luckily, the only things returned by*) + (* asynchronous tasks are object references and strings, so rather than implementing*) + (* the general recursive structure we'll just make one for each of the classes*) + (* that's been registered as a marshall-needing type*) + let generate_reference_task_result_func = + match t with Ref _ -> true | _ -> false + in + `O + [ + ("name", `String type_string) + ; ("class_name", `String class_name) + ; ("method_name", `String method_name) + ; ( "suppress_unchecked_warning" + , `Bool (match t with Map _ | Record _ -> true | _ -> false) + ) + ; ( "generate_reference_task_result_func" + , `Bool generate_reference_task_result_func + ) + ; ("method_body", `String (gen_marshall_body t)) + ] + ) + list_types + in + let json = + `O [("errors", `A errors); ("enums", `A enums); ("types", `A types)] + in + render_file ("Types.mustache", "Types.java") json templdir class_dir + let _ = let templdir = "templates" in let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in List.iter (fun x -> gen_class x class_dir) classes ; - gen_types_class class_dir ; populate_releases templdir class_dir ; + populate_types templdir class_dir ; let uncommented_license = string_of_file "LICENSE" in let class_license = open_out "autogen/xen-api/src/main/resources/LICENSE" in diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache new file mode 100644 index 00000000000..6af5336a3d0 --- /dev/null +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -0,0 +1,256 @@ +/* + * Copyright (c) Cloud Software Group, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1) Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package com.xensource.xenapi; +import java.util.*; +import com.fasterxml.jackson.annotation.JsonEnumDefaultValue; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * This class holds enum types and exceptions. + */ +public class Types +{ + /** + * Interface for all Record classes + */ + public interface Record + { + /** + * Convert a Record to a Map + */ + Map toMap(); + } + /** + * Base class for all XenAPI Exceptions + */ + public static class XenAPIException extends IOException { + public final String shortDescription; + public final String[] errorDescription; + XenAPIException(String shortDescription) + { + this.shortDescription = shortDescription; + this.errorDescription = null; + } + XenAPIException(String[] errorDescription) + { + this.errorDescription = errorDescription; + if (errorDescription.length > 0) + { + shortDescription = errorDescription[0]; + } else + { + shortDescription = ""; + } + } + public String toString() + { + if (errorDescription == null) + { + return shortDescription; + } else if (errorDescription.length == 0) + { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < errorDescription.length - 1; i++) + { + sb.append(errorDescription[i]); + } + sb.append(errorDescription[errorDescription.length - 1]); + return sb.toString(); + } + } + + /** + * Thrown if the response from the server contains an invalid status. + */ + public static class BadServerResponse extends XenAPIException + { + public BadServerResponse(JsonRpcResponseError responseError) + { + super(String.valueOf(responseError)); + } + } + + /** + * Checks the provided server response was successful. If the call + * failed, throws a XenAPIException. If the server + * returned an invalid response, throws a BadServerResponse. + * Otherwise, returns the server response as passed in. + */ + public static void checkError(JsonRpcResponseError response) throws XenAPIException, BadServerResponse + { + var errorData = response.data; + if(errorData.length == 0){ + throw new BadServerResponse(response); + } + var errorName = response.message; + + {{#errors}} + if (errorName.equals("{{class_name}}")){ + {{#err_params}} + String p{{index}} = errorData.length > {{index}} ? errorData[{{index}}] : ""; + {{/err_params}} + throw new Types.{{class_name}}({{#err_params}}p{{index}}{{^last}}, {{/last}}{{/err_params}}); + } + + {{/errors}} + + // An unknown error occurred + throw new Types.XenAPIException(errorData); + } + + {{#enums}} + public enum {{class_name}} { + /** + * The value does not belong to this enumeration + */ + @JsonEnumDefaultValue + UNRECOGNIZED, + {{#values}} + /** + * {{description}} + */ + @JsonProperty("{{name}}") + {{name_uppercase}}; + {{/values}} + + public String toString() { + if (this == UNRECOGNIZED) return "UNRECOGNIZED"; + {{#values}} + if (this == {{name_uppercase}}) return "{{name}}"; + {{/values}} + + /* This can never be reached */ + return "illegal enum"; + } + } + + {{/enums}} + + {{#errors}} + /** + * {{description}} + */ + public static class {{class_name}} extends XenAPIException { + public final String proxies; + + /** + * Create a new {{class_name}} + */ + public {{class_name}}({{#err_params}}String {{name}}{{^last}}, {{/last}}{{/err_params}}) { + super("The PVS site contains running proxies.");# + {{#err_params}} + this.{{name}} = {{name}}; + {{/err_params}} + } + } + + {{/errors}} + + {{#types}} + /** + * Converts an {@link Object} to a {@link {{{name}}}} object. + *
+ * This method takes an {@link Object} as input and attempts to convert it into a {@link name} object. + * If the input object is null, the method returns null. Otherwise, it creates a new {@link name} + * object using the input object's {@link String} representation. + *
+ * @param object The {@link Object} to be converted to a {@link name} object. + * @return A {@link name} object created from the input {@link Object}'s {@link String} representation, + * or null if the input object is null. + * @deprecated this method will not be publicly exposed in future releases of this package. + */ + @Deprecated{{#suppress_unchecked_warning}} + @SuppressWarnings("unchecked"){{/suppress_unchecked_warning}} + public static {{{name}}} {{method_name}}(Object object) { + if(object == null){ + return null; + } + {{{method_body}}} + } + + {{/types}} + + {{#types}}{{#generate_reference_task_result_func}} + /** + * Attempt to convert the {@link Task}'s result to a {@link {{{name}}}} object. + * Will return null if the method cannot fetch a valid value from the {@link Task} object. + * @param task The task from which to fetch the result. + * @param connection The connection + * @return the instantiated object if a valid value was found, null otherwise. + * @throws BadServerResponse Thrown if the response from the server contains an invalid status. + * @throws XenAPIException if the call failed. + * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON. + */ + public static {{class_name}} to{{class_name}}(Task task, Connection connection) throws IOException { + return Types.to{{class_name}}(parseResult(task.getResult(connection))); + } + + {{/generate_reference_task_result_func}} + {{/types}} + + public static class BadAsyncResult extends XenAPIException + { + public final String result; + + public BadAsyncResult(String result) + { + super(result); + this.result = result; + } + } + + private static String parseResult(String result) throws BadAsyncResult + { + Pattern pattern = Pattern.compile("(.*)"); + Matcher matcher = pattern.matcher(result); + if (!matcher.find() || matcher.groupCount() != 1) { + throw new Types.BadAsyncResult("Can't interpret: " + result); + } + + return matcher.group(1); + } + + public static EventBatch toEventBatch(Object object) { + if (object == null) { + return null; + } + Map map = (Map) object; + EventBatch batch = new EventBatch(); + batch.token = toString(map.get("token")); + batch.validRefCounts = map.get("valid_ref_counts"); + batch.events = toSetOfEventRecord(map.get("events")); + return batch; + } +} \ No newline at end of file From b175c0dd2e6ec492426d5fd65be9bc5f9797877b Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Wed, 24 Apr 2024 15:17:30 +0100 Subject: [PATCH 220/341] Use templates to generate Java classes Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/main.ml | 676 +++++++------------- ocaml/sdk-gen/java/templates/Class.mustache | 180 ++++++ 2 files changed, 427 insertions(+), 429 deletions(-) create mode 100644 ocaml/sdk-gen/java/templates/Class.mustache diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 8e7777bcae0..eaae4d2c1fb 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -33,10 +33,6 @@ let api = (*Here we extract a list of objs (look in datamodel_types.ml for the structure definitions)*) let classes = objects_of_api api -let print_license file = - output_string file Licence.bsd_two_clause ; - output_string file "\n\n" - (*How shall we translate datamodel identifiers into Java, with its conventions about case, and reserved words?*) let reserved_words = function @@ -184,27 +180,14 @@ let rec get_marshall_function_rec = function | Option ty -> get_marshall_function_rec ty -(*get_marshall_function (Set(Map(Float,Bool)));; -> "toSetOfMapOfDoubleBoolean"*) let get_marshall_function ty = "to" ^ get_marshall_function_rec ty -let _ = get_java_type switch_enum - -(* Generate the methods *) - let get_java_type_or_void = function | None -> "void" | Some (ty, _) -> get_java_type ty -(* Here are a lot of functions which ask questions of the messages associated with*) -(* objects, the answers to which are helpful when generating the corresponding java*) -(* functions. For instance is_method_static takes an object's message, and*) -(* determines whether it should be static or not in java, by looking at whether*) -(* it has a self parameter or not.*) - -(*Similar functions for deprecation of methods*) - let get_method_deprecated_release_name message = match message.msg_release.internal_deprecated_since with | Some version -> @@ -212,262 +195,6 @@ let get_method_deprecated_release_name message = | None -> None -let get_method_deprecated_annotation message = - match get_method_deprecated_release_name message with - | Some version -> - {|@Deprecated(since = "|} ^ version ^ {|")|} - | None -> - "" - -let get_method_param {param_type= ty; param_name= name; _} = - let ty = get_java_type ty in - let name = camel_case name in - sprintf "%s %s" ty name - -let get_method_params_for_signature params = - String.concat ", " ("Connection c" :: List.map get_method_param params) - -let get_method_params_for_xml message params = - let f = function - | {param_type= Record _; param_name= name; _} -> - camel_case name ^ "_map" - | {param_name= name; _} -> - camel_case name - in - match params with - | [] -> - if is_method_static message then - [] - else - ["this.ref"] - | _ -> - if is_method_static message then - List.map f params - else - "this.ref" :: List.map f params - -(* Here is the main method generating function.*) -let gen_method file cls message params async_version = - let return_type = - if - String.lowercase_ascii cls.name = "event" - && String.lowercase_ascii message.msg_name = "from" - then - "EventBatch" - else - get_java_type_or_void message.msg_result - in - let method_static = if is_method_static message then "static " else "" in - let method_name = camel_case message.msg_name in - let paramString = get_method_params_for_signature params in - let default_errors = - [ - ( "BadServerResponse" - , "Thrown if the response from the server contains an invalid status." - ) - ; ("XenAPIException", "if the call failed.") - ; ( "IOException" - , "if an error occurs during a send or receive. This includes cases \ - where a payload is invalid JSON." - ) - ] - in - let publishInfo = get_published_info_message message cls in - - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml message.msg_doc) ; - fprintf file " * Minimum allowed role: %s\n" - (get_minimum_allowed_role message) ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - let deprecated_info = - match get_method_deprecated_release_name message with - | Some version -> - " * @deprecated since " ^ version ^ "\n" - | None -> - "" - in - fprintf file "%s" deprecated_info ; - fprintf file " *\n" ; - fprintf file " * @param c The connection the call is made on\n" ; - - List.iter - (fun x -> - let paramPublishInfo = get_published_info_param message x in - fprintf file " * @param %s %s%s\n" (camel_case x.param_name) - (if x.param_doc = "" then "No description" else escape_xml x.param_doc) - (if paramPublishInfo = "" then "" else " " ^ paramPublishInfo) - ) - params ; - - ( if async_version then - fprintf file " * @return Task\n" - else - match message.msg_result with - | None -> - () - | Some (_, "") -> - fprintf file " * @return %s\n" - (get_java_type_or_void message.msg_result) - | Some (_, desc) -> - fprintf file " * @return %s\n" desc - ) ; - - List.iter - (fun x -> fprintf file " * @throws %s %s\n" (fst x) (snd x)) - default_errors ; - List.iter - (fun x -> - fprintf file " * @throws Types.%s %s\n" - (exception_class_case x.err_name) - x.err_doc - ) - message.msg_errors ; - - fprintf file " */\n" ; - - let deprecated_string = - match get_method_deprecated_annotation message with - | "" -> - "" - | other -> - " " ^ other ^ "\n" - in - if async_version then - fprintf file "%s public %sTask %sAsync(%s) throws\n" deprecated_string - method_static method_name paramString - else - fprintf file "%s public %s%s %s(%s) throws\n" deprecated_string - method_static return_type method_name paramString ; - - let all_errors = - List.map fst default_errors - @ List.map - (fun x -> "Types." ^ exception_class_case x.err_name) - message.msg_errors - in - fprintf file " %s {\n" (String.concat ",\n " all_errors) ; - - if async_version then - fprintf file " String methodCall = \"Async.%s.%s\";\n" - message.msg_obj_name message.msg_name - else - fprintf file " String methodCall = \"%s.%s\";\n" message.msg_obj_name - message.msg_name ; - - if message.msg_session then - fprintf file " String sessionReference = c.getSessionReference();\n" - else - () ; - - let record_params = - List.filter - (function {param_type= Record _; _} -> true | _ -> false) - message.msg_params - in - - List.iter - (fun {param_name= s; _} -> - let name = camel_case s in - fprintf file " var %s_map = %s.toMap();\n" name name - ) - record_params ; - - fprintf file " Object[] methodParameters = {" ; - - let methodParamsList = - if message.msg_session then - "sessionReference" :: get_method_params_for_xml message params - else - get_method_params_for_xml message params - in - - output_string file (String.concat ", " methodParamsList) ; - - fprintf file "};\n" ; - - if message.msg_result != None || async_version then - fprintf file " var typeReference = new TypeReference<%s>(){};\n" - (if async_version then "Task" else return_type) ; - - let last_statement = - match message.msg_result with - | None when not async_version -> - " c.dispatch(methodCall, methodParameters);\n" - | _ -> - " return c.dispatch(methodCall, methodParameters, typeReference);\n" - in - fprintf file "%s" last_statement ; - - fprintf file " }\n\n" - -(*Some methods have an almost identical asynchronous counterpart, which returns*) -(* a Task reference rather than its usual return value*) -let gen_method_and_asynchronous_counterpart file cls message = - let generator x = - if message.msg_async then gen_method file cls message x true ; - gen_method file cls message x false - in - match message.msg_params with - | [] -> - generator [] - | _ -> - let paramGroups = gen_param_groups message message.msg_params in - List.iter generator paramGroups - -(* Generate the record *) - -(* The fields of an object are stored in trees in the datamodel, which means that*) -(* the next three functions, which are conceptually for generating the fields*) -(* of each class, and for the corresponding entries in the toString and toMap*) -(* functions are in fact implemented as three sets of three mutual recursions,*) -(* which take the trees apart. *) - -let gen_record_field file prefix field cls = - let ty = get_java_type field.ty in - let full_name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name = camel_case full_name in - let publishInfo = get_published_info_field field cls in - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml field.field_description) ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - fprintf file " */\n" ; - fprintf file " @JsonProperty(\"%s\")\n" full_name ; - - if field.lifecycle.state = Lifecycle.Deprecated_s then - fprintf file " @Deprecated(since = \"%s\")\n" - (get_release_branding (get_deprecated_release field.lifecycle.transitions)) ; - - fprintf file " public %s %s;\n\n" ty name - -let rec gen_record_namespace file prefix (name, contents) cls = - List.iter (gen_record_contents file (name :: prefix) cls) contents - -and gen_record_contents file prefix cls = function - | Field f -> - gen_record_field file prefix f cls - | Namespace (n, cs) -> - gen_record_namespace file prefix (n, cs) cls - -(***) - -let gen_record_tostring_field file prefix field = - let name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name = camel_case name in - fprintf file - " print.printf(\"%%1$20s: %%2$s\\n\", \"%s\", this.%s);\n" name - name - -let rec gen_record_tostring_namespace file prefix (name, contents) = - List.iter (gen_record_tostring_contents file (name :: prefix)) contents - -and gen_record_tostring_contents file prefix = function - | Field f -> - gen_record_tostring_field file prefix f - | Namespace (n, cs) -> - gen_record_tostring_namespace file prefix (n, cs) - -(***) - let field_default = function | SecretString | String -> {|""|} @@ -494,166 +221,11 @@ let field_default = function | Option _ -> "null" -let gen_record_tomap_field file prefix field = - let name = String.concat "_" (List.rev (field.field_name :: prefix)) in - let name' = camel_case name in - let default = field_default field.ty in - fprintf file " map.put(\"%s\", this.%s == null ? %s : this.%s);\n" - name name' default name' - -let rec gen_record_tomap_contents file prefix = function - | Field f -> - gen_record_tomap_field file prefix f - | Namespace (n, cs) -> - List.iter (gen_record_tomap_contents file (n :: prefix)) cs - -(*Generate the Record subclass for the given class, with its toString and toMap*) -(* methods. We're also modifying the records hash table as a side effect*) - -let gen_record file cls = - let class_name = class_case cls.name in - let _ = Hashtbl.replace records cls.name cls.contents in - let contents = cls.contents in - fprintf file " /**\n" ; - fprintf file " * Represents all the fields in a %s\n" class_name ; - fprintf file " */\n" ; - fprintf file " public static class Record implements Types.Record {\n" ; - fprintf file " public String toString() {\n" ; - fprintf file " StringWriter writer = new StringWriter();\n" ; - fprintf file " PrintWriter print = new PrintWriter(writer);\n" ; - - List.iter (gen_record_tostring_contents file []) contents ; - (*for the Event.Record, we have to add in the snapshot field by hand, because it's not in the data model!*) - if cls.name = "event" then - fprintf file - " print.printf(\"%%1$20s: %%2$s\\n\", \"snapshot\", \ - this.snapshot);\n" ; - - fprintf file " return writer.toString();\n" ; - fprintf file " }\n\n" ; - fprintf file " /**\n" ; - fprintf file " * Convert a %s.Record to a Map\n" cls.name ; - fprintf file " */\n" ; - fprintf file " public Map toMap() {\n" ; - fprintf file " var map = new HashMap();\n" ; - - List.iter (gen_record_tomap_contents file []) contents ; - if cls.name = "event" then - fprintf file " map.put(\"snapshot\", this.snapshot);\n" ; - - fprintf file " return map;\n" ; - fprintf file " }\n\n" ; - - List.iter (gen_record_contents file [] cls) contents ; - if cls.name = "event" then ( - fprintf file " /**\n" ; - fprintf file - " * The record of the database object that was added, changed or \ - deleted\n" ; - fprintf file - " * (the actual type will be VM.Record, VBD.Record or similar)\n" ; - fprintf file " */\n" ; - fprintf file " public Object snapshot;\n" - ) ; - - fprintf file " }\n\n" - (* Generate the class *) let class_is_empty cls = cls.contents = [] -let gen_class cls folder = - let class_name = class_case cls.name in - let methods = cls.messages in - let file = open_out (Filename.concat folder class_name ^ ".java") in - let publishInfo = get_published_info_class cls in - print_license file ; - fprintf file - {|package com.xensource.xenapi; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonValue; -import com.fasterxml.jackson.core.type.TypeReference; -import com.xensource.xenapi.Types.BadServerResponse; -import com.xensource.xenapi.Types.XenAPIException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.*; -import java.io.IOException; -|} ; - fprintf file "/**\n" ; - fprintf file " * %s\n" cls.description ; - if not (publishInfo = "") then fprintf file " * %s\n" publishInfo ; - fprintf file " *\n" ; - fprintf file " * @author Cloud Software Group, Inc.\n" ; - fprintf file " */\n" ; - fprintf file "public class %s extends XenAPIObject {\n\n" class_name ; - - if class_is_empty cls then - fprintf file - " @JsonValue\n\ - \ public String toWireString() {\n\ - \ return null;\n\ - \ }\n\n" - else ( - fprintf file " /**\n" ; - fprintf file " * The XenAPI reference (OpaqueRef) to this object.\n" ; - fprintf file " */\n" ; - fprintf file " protected final String ref;\n\n" ; - fprintf file " /**\n" ; - fprintf file " * For internal use only.\n" ; - fprintf file " */\n" ; - fprintf file " %s(String ref) {\n" class_name ; - fprintf file " this.ref = ref;\n" ; - fprintf file " }\n\n" ; - fprintf file " /**\n" ; - fprintf file - " * @return The XenAPI reference (OpaqueRef) to this object.\n" ; - fprintf file " */\n" ; - fprintf file " @JsonValue\n" ; - fprintf file " public String toWireString() {\n" ; - fprintf file " return this.ref;\n" ; - fprintf file " }\n\n" - ) ; - - if not (class_is_empty cls) then ( - fprintf file " /**\n" ; - fprintf file - " * If obj is a %s, compares XenAPI references for equality.\n" - class_name ; - fprintf file " */\n" ; - fprintf file " @Override\n" ; - fprintf file " public boolean equals(Object obj)\n" ; - fprintf file " {\n" ; - fprintf file " if (obj instanceof %s)\n" class_name ; - fprintf file " {\n" ; - fprintf file " %s other = (%s) obj;\n" class_name class_name ; - fprintf file " return other.ref.equals(this.ref);\n" ; - fprintf file " } else\n" ; - fprintf file " {\n" ; - fprintf file " return false;\n" ; - fprintf file " }\n" ; - fprintf file " }\n\n" ; - - (*hashcode*) - fprintf file " @Override\n" ; - fprintf file " public int hashCode()\n" ; - fprintf file " {\n" ; - fprintf file " return ref.hashCode();\n" ; - fprintf file " }\n\n" ; - flush file ; - gen_record file cls ; - flush file - ) ; - - List.iter (gen_method_and_asynchronous_counterpart file cls) methods ; - - flush file ; - fprintf file "}" ; - close_out file - -(**?*) -(* Generate Marshalling Class *) (*This generates the special case code for marshalling the snapshot field in an Event.Record*) @@ -1100,12 +672,258 @@ let populate_types templdir class_dir = in render_file ("Types.mustache", "Types.java") json templdir class_dir +let get_message_object cls message async_version params = + let is_method_async = async_version in + let return_type = + if is_method_async then + "Task" + else if + String.lowercase_ascii cls.name = "event" + && String.lowercase_ascii message.msg_name = "from" + then + "EventBatch" + else + get_java_type_or_void message.msg_result + in + let return_description = + match message.msg_result with + | None -> + get_java_type_or_void message.msg_result + | Some (_, description) -> + description + in + let returns_void = message.msg_result = None && not async_version in + let record_parameters = + List.map + (fun parameter -> + `O [("name_camel", `String (camel_case parameter.param_name))] + ) + (List.filter + (function {param_type= Record _; _} -> true | _ -> false) + message.msg_params + ) + in + let is_deprecated = + match message.msg_release.internal_deprecated_since with + | Some _ -> + true + | None -> + false + in + let deprecated_release = + match get_method_deprecated_release_name message with + | Some v -> + get_release_branding v + | None -> + "" + in + let type_reference = + if is_method_async then + "Task" + else if message.msg_result != None then + return_type + else + "" + in + let parameters = + List.map + (fun parameter -> + let publish_info = get_published_info_param message parameter in + let name_camel = camel_case parameter.param_name in + let description = escape_xml parameter.param_doc in + `O + [ + ("type", `String (get_java_type parameter.param_type)) + ; ( "is_record" + , `Bool + (match parameter.param_type with Record _ -> true | _ -> false) + ) + ; ("name_camel", `String name_camel) + ; ("description", `String description) + ; ("publish_info", `String publish_info) + ] + ) + params + in + let error_definitions = + List.map + (fun error -> + let exception_name = exception_class_case error.err_name in + ("Types." ^ exception_name, escape_xml error.err_doc) + ) + message.msg_errors + in + let errors = + List.map + (fun (name, description) -> + `O [("name", `String name); ("description", `String description)] + ) + error_definitions + in + let is_static = is_method_static message in + let session_parameter = + `O + [ + ("type", `String "String") + ; ("is_record", `Bool false) + ; ("name_camel", `String "sessionReference") + ; ("description", `String "") + ; ("publish_info", `String "") + ] + in + let non_static_reference_parameter = + `O + [ + ("type", `String "String") + ; ("is_record", `Bool false) + ; ("name_camel", `String "this.ref") + ; ("description", `String "") + ; ("publish_info", `String "") + ] + in + let extra_method_parameters = + match (message.msg_session, is_static) with + | true, true -> + [session_parameter] + | true, false -> + [session_parameter; non_static_reference_parameter] + | false, true -> + [] + | false, false -> + [non_static_reference_parameter] + in + let rec set_is_last params acc = + match params with + | [] -> + [] + | `O last :: [] -> + `O (("is_last", `Bool true) :: last) :: acc + | `O h :: tail -> + `O (("is_last", `Bool false) :: h) :: set_is_last tail acc + in + let method_parameters = + set_is_last (extra_method_parameters @ parameters) [] + in + `O + [ + ("return_type", `String return_type) + ; ("is_async", `Bool async_version) + ; ("return_description", `String return_description) + ; ("returns_void", `Bool returns_void) + ; ("is_static", `Bool is_static) + ; ("name_camel", `String (camel_case message.msg_name)) + ; ("name", `String message.msg_name) + ; ("publish_info", `String (get_published_info_message message cls)) + ; ("description", `String (escape_xml message.msg_doc)) + ; ("minimum_allowed_role", `String (get_minimum_allowed_role message)) + ; ("object_name", `String message.msg_obj_name) + ; ("supports_session", `Bool message.msg_session) + ; ("record_parameters", `A record_parameters) + ; ("is_deprecated", `Bool is_deprecated) + ; ("deprecated_release", `String deprecated_release) + ; ("type_reference", `String type_reference) + ; ("parameters", `A parameters) + ; ("method_parameters", `A method_parameters) + ; ("errors", `A errors) + ] + +let populate_class cls templdir class_dir = + Hashtbl.replace records cls.name cls.contents ; + let class_name = class_case cls.name in + let rec content_fields content namespace_name = + match content with + | Field f -> + let name_with_prefix = + if namespace_name == "" then + f.field_name + else + namespace_name ^ "_" ^ f.field_name + in + let name_camel = camel_case name_with_prefix in + let ty = get_java_type f.ty in + let publish_info = get_published_info_field f cls in + let description = escape_xml f.field_description in + let is_deprecated = f.lifecycle.state = Lifecycle.Deprecated_s in + let deprecated_release = + if is_deprecated then + get_release_branding (get_deprecated_release f.lifecycle.transitions) + else + "" + in + [ + `O + [ + ("name", `String name_with_prefix) + ; ("name_camel", `String name_camel) + ; ("default_value", `String (field_default f.ty)) + ; ("description", `String description) + ; ("type", `String ty) + ; ("publish_info", `String publish_info) + ; ("is_deprecated", `Bool is_deprecated) + ; ("deprecated_release", `String deprecated_release) + ] + ] + | Namespace (name, contents) -> + List.flatten (List.map (fun c -> content_fields c name) contents) + in + let fields = + List.flatten (List.map (fun c -> content_fields c "") cls.contents) + in + let rec get_async_and_sync_methods methods acc = + match methods with + | [] -> + acc + | h :: tail -> + let get_variants messages = + (* we get the param groups outside of the mapping because we know it's always the same message *) + let params = gen_param_groups h h.msg_params in + match params with + | [] -> + List.map + (fun (message, is_async) -> (message, is_async, [])) + messages + | _ -> + List.map + (fun (message, is_async) -> + List.map (fun param -> (message, is_async, param)) params + ) + messages + |> List.flatten + in + if h.msg_async then + get_variants [(h, true); (h, false)] + @ get_async_and_sync_methods tail acc + else + get_variants [(h, false)] @ get_async_and_sync_methods tail acc + in + let async_and_sync_methods = get_async_and_sync_methods cls.messages [] in + let methods = + List.map + (fun (message, async_version, params) -> + get_message_object cls message async_version params + ) + async_and_sync_methods + in + let json = + `O + [ + ("class_name", `String class_name) + ; ("description", `String cls.description) + ; ("publish_info", `String (get_published_info_class cls)) + ; ("is_empty_class", `Bool (class_is_empty cls)) + ; ("is_event_class", `Bool (cls.name = "event")) + ; ("fields", `A fields) + ; ("methods", `A methods) + ] + in + render_file ("Class.mustache", class_name ^ ".java") json templdir class_dir + let _ = let templdir = "templates" in let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in - List.iter (fun x -> gen_class x class_dir) classes ; populate_releases templdir class_dir ; populate_types templdir class_dir ; + List.iter (fun cls -> populate_class cls templdir class_dir) classes ; let uncommented_license = string_of_file "LICENSE" in let class_license = open_out "autogen/xen-api/src/main/resources/LICENSE" in diff --git a/ocaml/sdk-gen/java/templates/Class.mustache b/ocaml/sdk-gen/java/templates/Class.mustache new file mode 100644 index 00000000000..986a1fbee79 --- /dev/null +++ b/ocaml/sdk-gen/java/templates/Class.mustache @@ -0,0 +1,180 @@ +/* + * Copyright (c) Cloud Software Group, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1) Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +package com.xensource.xenapi; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import com.fasterxml.jackson.core.type.TypeReference; +import com.xensource.xenapi.Types.BadServerResponse; +import com.xensource.xenapi.Types.XenAPIException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.*; +import java.io.IOException; + +/** + * {{description}}{{#publish_info}} + * {{{publish_info}}}{{/publish_info}} + * + * @author Cloud Software Group, Inc. + */ +public class {{class_name}} extends XenAPIObject { + + {{#is_empty_class}} + @JsonValue + public String toWireString() { + return null; + } + + {{/is_empty_class}} + {{^is_empty_class}} + /** + * The XenAPI reference (OpaqueRef) to this object. + */ + protected final String ref; + + /** + * For internal use only. + */ + {{{class_name}}}(String ref) { + this.ref = ref; + } + + /** + * @return The XenAPI reference (OpaqueRef) to this object. + */ + @JsonValue + public String toWireString() { + return this.ref; + } + + /** + * If obj is a {{{class_name}}}, compares XenAPI references for equality. + */ + @Override + public boolean equals(Object obj) + { + if (obj instanceof {{{class_name}}}) + { + {{{class_name}}} other = ({{{class_name}}}) obj; + return other.ref.equals(this.ref); + } else + { + return false; + } + } + + @Override + public int hashCode() + { + return ref.hashCode(); + } + + /** + * Represents all the fields in a {{{class_name}}} + */ + public static class Record implements Types.Record { + public String toString() { + StringWriter writer = new StringWriter(); + PrintWriter print = new PrintWriter(writer); + {{#fields}} + print.printf("%1$20s: %2$s\n", "{{{name_camel}}}", this.{{{name_camel}}}); + {{/fields}} + {{#is_event_class}} + print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); + {{/is_event_class}} + return writer.toString(); + } + + /** + * Convert a {{{class_name}}}.Record to a Map + */ + public Map toMap() { + var map = new HashMap(); + {{#fields}} + map.put("{{{name}}}", this.{{{name_camel}}} == null ? {{{default_value}}} : this.{{{name_camel}}}); + {{/fields}} + {{#is_event_class}} + print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); + {{/is_event_class}} + return map; + } + + {{#fields}} + /** + * {{{description}}}{{#publish_info}} + * {{{publish_info}}}{{/publish_info}} + */ + @JsonProperty("{{{name}}}"){{#is_deprecated}} + @Deprecated(since = "{{{deprecated_release}}}"){{/is_deprecated}} + public {{{type}}} {{{name_camel}}}; + + {{/fields}} + {{#is_event_class}} + /** + * The record of the database object that was added, changed or deleted. + * The actual type will be VM.Record, VBD.Record, or similar. + */ + public Object snapshot; + {{/is_event_class}} + } + + {{/is_empty_class}} + {{#methods}} + /** + * {{{description}}} + * Minimum allowed role: {{{minimum_allowed_role}}} + * {{{publish_info}}}{{#is_deprecated}} + * @deprecated since {{{deprecated_release}}}{{/is_deprecated}} + * + * @param c The connection the call is made on{{#parameters}} + * @param {{{name_camel}}} {{^description}}No description{{/description}}{{#description}}{{{.}}}{{/description}} {{{publish_info}}}{{/parameters}}{{^returns_void}} + * @return {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_description}}}{{/is_async}}{{/returns_void}} + * @throws BadServerResponse Thrown if the response from the server contains an invalid status. + * @throws XenAPIException if the call failed. + * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON.{{#errors}} + * @throws {{{name}}} {{{description}}}{{/errors}} + */{{#is_deprecated}} + @Deprecated(since = "{{{deprecated_release}}}"){{/is_deprecated}} + public{{#is_static}} static{{/is_static}} {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_type}}}{{/is_async}} {{name_camel}}{{#is_async}}Async{{/is_async}}(Connection c{{#parameters}}, {{{type}}} {{{name_camel}}}{{/parameters}}) throws + BadServerResponse, + XenAPIException, + IOException{{#errors}}, + {{name}}{{/errors}} { + String methodCall = "{{#is_async}}Async.{{/is_async}}{{{object_name}}}.{{{name}}}"; + {{#supports_session}}String sessionReference = c.getSessionReference();{{/supports_session}}{{#method_parameters}}{{#is_record}} + var {{{name_camel}}}_map = {{{name_camel}}}.toMap();{{/is_record}}{{/method_parameters}} + Object[] methodParameters = { {{#method_parameters}}{{{name_camel}}}{{#is_record}}_map{{/is_record}}{{^is_last}}, {{/is_last}}{{/method_parameters}} };{{#type_reference}} + var typeReference = new TypeReference<{{{type_reference}}}>(){};{{/type_reference}} + {{^returns_void}}return {{/returns_void}}c.dispatch(methodCall, methodParameters{{#type_reference}}, typeReference{{/type_reference}}); + } + + {{/methods}} +} \ No newline at end of file From b05efa98636b9b93213bc7c3e1ca8ead5dd5b72f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 11 Oct 2021 15:50:49 +0100 Subject: [PATCH 221/341] CP-38343: xenopsd: GC and memory RRD stats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on xapi_stats.ml, but heavily refactored. Signed-off-by: Edwin Török (cherry picked from commit 40d3aa7923c7a5c7654d9e83508e52b3b7c3840a) (cherry picked from commit b30f2310e6087ef59c486410f079175bf36aa705) --- ocaml/xenopsd/xc/dune | 3 + ocaml/xenopsd/xc/mem_stats.ml | 336 +++++++++++++++++++++++++++++ ocaml/xenopsd/xc/mem_stats.mli | 22 ++ ocaml/xenopsd/xc/xenops_xc_main.ml | 2 + 4 files changed, 363 insertions(+) create mode 100644 ocaml/xenopsd/xc/mem_stats.ml create mode 100644 ocaml/xenopsd/xc/mem_stats.mli diff --git a/ocaml/xenopsd/xc/dune b/ocaml/xenopsd/xc/dune index 4a79452dbbe..b841da23fbc 100644 --- a/ocaml/xenopsd/xc/dune +++ b/ocaml/xenopsd/xc/dune @@ -25,6 +25,9 @@ rpclib.core rpclib.json rresult + rrdd-plugin + rrdd-plugin.base + rrdd-plugin.local sexplib0 qmp threads.posix diff --git a/ocaml/xenopsd/xc/mem_stats.ml b/ocaml/xenopsd/xc/mem_stats.ml new file mode 100644 index 00000000000..cb2b2a1eb92 --- /dev/null +++ b/ocaml/xenopsd/xc/mem_stats.ml @@ -0,0 +1,336 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +module D = Debug.Make (struct let name = "mem_stats" end) + +module Compat = struct + let mtime_clock_counter = Mtime_clock.counter + + let mtime_clock_count = Mtime_clock.count + + let mtime_span_to_s f = Mtime.Span.to_float_ns f *. 1e-9 + + let file_lines_fold = Xapi_stdext_unix.Unixext.file_lines_fold + + let reporter_async ~shared_page_count ~dss_f = + let open Rrdd_plugin in + Reporter.start_async + (module D : Debug.DEBUG) + ~uid:"mem-stats" ~neg_shift:0.5 ~target:(Reporter.Local shared_page_count) + ~protocol:Rrd_interface.V2 ~dss_f +end + +open Compat + +module SlowReporter = struct + (** [report ~interval_s ~generate_dss] calls [generate_dss] every [interval_s] only, + and substitutes the previous value when called more often. + Using VT_Unknown or NaN would leave gaps in the graph. + Report_local only supports reporting at 5s intervals, but some metrics are too costly to + gather that often, e.g. Gc.stat needs to walk the entire heap. + *) + let report ~interval_s ~generate_dss state = + match state with + | Some (t0, dss) when mtime_clock_count t0 |> mtime_span_to_s < interval_s + -> + (state, (false, dss)) + | None | Some _ -> + let dss = generate_dss () in + let next_state = Some (mtime_clock_counter (), dss) in + (next_state, (true, dss)) + + let iter_of_fold f = + let last = ref None in + fun () -> + let next, r = f !last in + (* to make reasoning about metrics easier, this is the only place + that contains explicit mutation in this file + (other than hashtbl construction on startup) + *) + last := next ; + r +end + +module SimpleMetrics = struct + (* metric definitions *) + + (* Caveats: + * do not use ~transform flag to ds_make: it is lost during rrd-transport with no warning! + * writing a VT_Unknown causes an exception and prevents other values from being written too + * the way to write an unknown value is to write something out of min/max range, which then gets + converted to nan upon reading (or write nan if the type is float, don't do this if type is int + since it causes a metadata change) + * use Sys.word_size, and not hard-coded word-to-kib conversion, the old code still had the + values for a 32-bit word + * the value type (int64 vs float) is cached by the transport, changing it causes a metadata + change + * it is best to always write out the same Rrd list, even if some values are missing, otherwise + the metadata would have to get reparsed (crc changed). But that doesn't actually work that way + because the metadata also includes the values (they are changed to uninitialized_ds only upon + read), thus any change in value triggers a metadata crc change, and a metadata reparse! Still + lets assume this will be fixed and avoid needless metadata changes! + * everything gets converted to a float in the end: int64 type just has some more precision prior + to delta calculation. However for our purposes here the 'float' type (which is double precision) + has 53-bits of precision, which is more than enough for KiB - it results in 63-bits of precision + in term of bytes (including sign) + *) + + let ds_int64 i = Rrd.VT_Int64 i + + let ds_float f = Rrd.VT_Float f + + let ds_update ds v = {ds with Ds.ds_value= v} + + let executable = Filename.basename Sys.executable_name + + let ds_name name = + Printf.sprintf "%s_%s" executable (Astring.String.Ascii.lowercase name) + + let ds_description desc = Printf.sprintf desc executable + + let to_float ds = + match ds.Ds.ds_value with + | Rrd.VT_Int64 i -> + if i >= 0L then Int64.to_float i else nan + | Rrd.VT_Float f -> + f + | Rrd.VT_Unknown -> + nan + + let define_unit ~ty ~units ~min ?(default = true) name description = + let name = ds_name name in + let description = ds_description description in + fun value -> + Ds.ds_make ~name ~description ~value ~ty ~default ~units ~min () + + let kib ?default ?(min = 0.0) v = + define_unit ~ty:Rrd.Gauge ~units:"KiB" ~min ?default v + + let kib_per_s = define_unit ~ty:Rrd.Derive ~units:"KiB/s" ~min:0.0 + + let count = define_unit ~ty:Rrd.Gauge ~units:"" ~min:0.0 + + let words_to_kib w = w *. float (Sys.word_size / 8) /. 1024. |> ds_float +end + +module Proc = struct + module KV = Astring.String.Map + + (** [drop_unit s] parses values of the form 'NN kB' or 'NN' and returns 'NN'. *) + let drop_unit s = + match Astring.String.cut ~sep:" " s with None -> s | Some (v, _) -> v + + (** [parse_value s] parses values of the form ' N kB' or ' N'. *) + let parse_value s = s |> Astring.String.trim |> drop_unit |> Int64.of_string + + let file_lines_filter_map f ~path = + let fold acc line = + match f line with None -> acc | Some (k, v) -> KV.add k v acc + in + file_lines_fold fold KV.empty path + + open SimpleMetrics + + let parse_keyvalue fields ~path () = + let parse_pairs = function + | None -> + None + | Some (key, value) -> ( + match KV.find key fields with + | None -> + None + | Some ds -> + let v = parse_value value in + Some (key, ds_update ds @@ ds_int64 v) + ) + in + file_lines_filter_map ~path @@ fun line -> + line |> Astring.String.cut ~sep:":" |> parse_pairs + + let define_fields ~path l = + let kv = + ListLabels.fold_left l ~init:KV.empty ~f:(fun acc (key, ds) -> + KV.add key ds acc + ) + in + parse_keyvalue kv ~path:(Filename.concat "/proc/self" path) + + let unknown = ds_int64 (-1L) + + let kib ?default key desc = (key, kib ?default key desc unknown) + + let count ?default key desc = (key, count ?default key desc unknown) + + module Fields = struct + let vmdata = "VmData" + + let vmpte = "VmPTE" + + let threads = "Threads" + + let fdsize = "FDSize" + + let vmsize = "VmSize" + + let vmlck = "VmLck" + + let vmpin = "VmPin" + + let vmstk = "VmStk" + + let rss = "Rss" + + (* there is also /proc/self/stat and /proc/self/statm, but we'd need to open and parse both *) + let status = + define_fields ~path:"status" + [ + count threads "Total number of threads used by %s" + ; count fdsize "Total number of file descriptors used by %s" + ; kib vmsize "Total amount of memory mapped by %s" + ; kib vmlck "Total amount of memory locked by %s" + ; kib vmpin "Total amount of memory pinned by %s" + (* VmRSS is inaccurate accoring to latest proc(5) *) + ; kib vmdata + "Total amount of writable, non-shared and non-stack memory used by \ + %s" + ; kib vmstk "Total amount of main stack memory used by %s" + ; kib vmpte "Total amount of page table entry memory used by %s" + ] + + (* According to latest proc(5) these are slower, but provide more accurate information. + The RSS reported by other stat counters could be off depending on the number of threads. *) + let smaps_rollup = + define_fields ~path:"smaps_rollup" + [kib rss "Total amount of resident memory used by %s"] + end + + let find key kv = to_float (KV.get key kv) + + let to_list kv = KV.bindings kv |> List.rev_map snd +end + +module GcStat = struct + open SimpleMetrics + open Gc + + let ocaml_total = + let field = kib "ocaml_total" "Total OCaml memory used by %s" in + fun gc control -> + gc.heap_words + control.minor_heap_size |> float |> words_to_kib |> field + + let maybe_words name description v = + (* quick_stat would return a value of 0, which is not valid *) + v |> float |> words_to_kib |> kib ~min:0.001 name description + + let memory_allocation_precise (gc, control) = + [ + ocaml_total gc control + ; gc.minor_words +. gc.major_words -. gc.promoted_words + |> words_to_kib + |> kib_per_s "ocaml_allocation_rate" + "Amount of allocations done by OCaml in the given period by %s" + ] + + let memory_allocation_approx_expensive (gc, _control) = + [ + (* see https://github.com/ocaml/ocaml/blob/trunk/stdlib/gc.mli#L50-L59, without running a major + cycle the live_words may overestimate the actual live words, "live" just means "not currently + known to be collectible"*) + gc.live_words + |> maybe_words "ocaml_maybe_live" + "OCaml memory not currently known to be collectable by %s" + ; gc.free_words |> maybe_words "ocaml_free" "OCaml memory available to %s" + ] +end + +module Derived = struct + open SimpleMetrics + + let memextra_kib = + kib "mem_extra" "Total amount of non-OCaml and non-stack memory used by %s" + + let ulimit_stack = + let ic = Unix.open_process_in "ulimit -s" in + let r = ic |> input_line |> Int64.of_string in + close_in_noerr ic ; Int64.to_float r + + let memextra_kib stats (gc_stat, control) = + let ocaml_total_kib = GcStat.ocaml_total gc_stat control |> to_float in + let vmdata = Proc.find Proc.Fields.vmdata stats in + let vmpte = Proc.find Proc.Fields.vmpte stats in + let threads = Proc.find Proc.Fields.threads stats in + (* Each thread, except the main one will allocate 'ulimit -s' worth of VmData. + This won't immediately show up in VmRss until actually used (e.g. by a deep calltree due to recursion) *) + vmdata -. vmpte -. (ulimit_stack *. (threads -. 1.)) -. ocaml_total_kib + |> ds_float + |> memextra_kib +end + +let observe_stats l = + let names = ListLabels.rev_map l ~f:(fun ds -> ds.Ds.ds_name) in + let values = + ListLabels.rev_map l ~f:(fun ds -> + let f = + match ds.Ds.ds_value with + | Rrd.VT_Int64 i -> + Int64.to_float i + | Rrd.VT_Float f -> + f + | Rrd.VT_Unknown -> + nan + in + ds.Ds.ds_pdp_transform_function f |> Printf.sprintf "%.0f" + ) + in + D.debug "stats header: %s" (String.concat "," names) ; + D.debug "stats values: %s" (String.concat "," values) + +let generate_expensive_stats = + let generate_dss () = + let stat = Gc.stat () in + let gc_control = Gc.get () in + let rss = Proc.Fields.smaps_rollup () |> Proc.to_list in + let gcstat = GcStat.memory_allocation_approx_expensive (stat, gc_control) in + List.rev_append rss gcstat + in + SlowReporter.iter_of_fold (SlowReporter.report ~interval_s:150. ~generate_dss) + +let generate_stats_exn () = + let status = Proc.Fields.status () in + let gc_stat = Gc.quick_stat () in + let gc_control = Gc.get () in + let derived = Derived.memextra_kib status (gc_stat, gc_control) in + let gcstat = GcStat.memory_allocation_precise (gc_stat, gc_control) in + let is_slow, slow_stats = generate_expensive_stats () in + let stats = + derived :: List.concat [gcstat; Proc.to_list status; slow_stats] + in + if is_slow then + observe_stats stats ; + stats |> List.rev_map (fun x -> (Rrd.Host, x)) + +let generate_stats () = + try generate_stats_exn () + with e -> + D.log_backtrace () ; + D.debug "Failed to generate stats: %s" (Printexc.to_string e) ; + [] + +(* xapi currently exports 5 datasources if a slave or 7 if a master; this + * comfortably fits into a single page. *) +let shared_page_count = 1 + +let start () = reporter_async ~shared_page_count ~dss_f:generate_stats + +let stop reporter = Rrdd_plugin.Reporter.cancel ~reporter diff --git a/ocaml/xenopsd/xc/mem_stats.mli b/ocaml/xenopsd/xc/mem_stats.mli new file mode 100644 index 00000000000..d42fc1397c5 --- /dev/null +++ b/ocaml/xenopsd/xc/mem_stats.mli @@ -0,0 +1,22 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val start : unit -> Rrdd_plugin.Reporter.t +(** Start a thread which will act as an RRDD plugin, and report interesting + stats to RRDD. *) + +val stop : Rrdd_plugin.Reporter.t -> unit +(** Stop the stats reporting thread. *) + +val generate_stats : unit -> (Rrd.ds_owner * Ds.ds) list diff --git a/ocaml/xenopsd/xc/xenops_xc_main.ml b/ocaml/xenopsd/xc/xenops_xc_main.ml index b7fce8d0b65..b49f8f0f6d3 100644 --- a/ocaml/xenopsd/xc/xenops_xc_main.ml +++ b/ocaml/xenopsd/xc/xenops_xc_main.ml @@ -57,4 +57,6 @@ let _ = ~specific_nonessential_paths:Xc_resources.nonessentials () ; check_domain0_uuid () ; make_var_run_xen () ; + let reporter = Mem_stats.start () in + at_exit (fun () -> Mem_stats.stop reporter) ; Xenopsd.main (module Xenops_server_xen : Xenops_server_plugin.S) From 8473c355ce07405153a582370dae823ab608bfe4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 5 Aug 2024 16:59:07 +0100 Subject: [PATCH 222/341] CP-38343: use sscanf to parse /proc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xenopsd/xc/mem_stats.ml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ocaml/xenopsd/xc/mem_stats.ml b/ocaml/xenopsd/xc/mem_stats.ml index cb2b2a1eb92..9e01d14473e 100644 --- a/ocaml/xenopsd/xc/mem_stats.ml +++ b/ocaml/xenopsd/xc/mem_stats.ml @@ -127,12 +127,11 @@ end module Proc = struct module KV = Astring.String.Map - (** [drop_unit s] parses values of the form 'NN kB' or 'NN' and returns 'NN'. *) - let drop_unit s = - match Astring.String.cut ~sep:" " s with None -> s | Some (v, _) -> v + (** [parse_value_count] parses ' N'. *) + let parse_value_count s = Scanf.sscanf s " %Lu" Fun.id - (** [parse_value s] parses values of the form ' N kB' or ' N'. *) - let parse_value s = s |> Astring.String.trim |> drop_unit |> Int64.of_string + (** [parse_value_kib s] parses values of the form ' N kB'. *) + let parse_value_kib s = Scanf.sscanf s " %Lu kB" Fun.id let file_lines_filter_map f ~path = let fold acc line = @@ -150,7 +149,7 @@ module Proc = struct match KV.find key fields with | None -> None - | Some ds -> + | Some (parse_value, ds) -> let v = parse_value value in Some (key, ds_update ds @@ ds_int64 v) ) @@ -168,9 +167,11 @@ module Proc = struct let unknown = ds_int64 (-1L) - let kib ?default key desc = (key, kib ?default key desc unknown) + let kib ?default key desc = + (key, (parse_value_kib, kib ?default key desc unknown)) - let count ?default key desc = (key, count ?default key desc unknown) + let count ?default key desc = + (key, (parse_value_count, count ?default key desc unknown)) module Fields = struct let vmdata = "VmData" From 5446171d0164784ca763f62e98e4217a8feeb450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 6 Aug 2024 16:32:44 +0100 Subject: [PATCH 223/341] CA-396743: log non managed devices in PIF.scan MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xapi/xapi_pif.ml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index d6d7a16a692..04e942f4d10 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -667,6 +667,8 @@ let scan ~__context ~host = ([], []) ) in + debug "non-managed devices=%s" (String.concat "," non_managed_devices) ; + debug "disallow-unplug devices=%s" (String.concat "," disallow_unplug_devices) ; Xapi_stdext_threads.Threadext.Mutex.execute scan_m (fun () -> let t = make_tables ~__context ~host in let devices_not_yet_represented_by_pifs = @@ -681,6 +683,8 @@ let scan ~__context ~host = let mTU = Int64.of_int (Net.Interface.get_mtu dbg device) in let managed = not (List.mem device non_managed_devices) in let disallow_unplug = List.mem device disallow_unplug_devices in + debug "About to introduce %s, managed=%b, disallow-unplug=%b" device + managed disallow_unplug ; let (_ : API.ref_PIF) = introduce_internal ~t ~__context ~host ~mAC ~mTU ~vLAN:(-1L) ~vLAN_master_of:Ref.null ~device ~managed ~disallow_unplug () From 5acf001e6bccc41090dd48d318a6d5804dcc8f96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 6 Aug 2024 16:33:12 +0100 Subject: [PATCH 224/341] CA-396743: make Network.managed reflect PIF.managed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will make it easier to filter out these networks in test code. A PIF is unmanaged when it is a boot from SAN interface for example, as returned by the 'bfs-interfaces' script. Certain operations are not valid on such interfaces, e.g. you cannot use them to export NBD devices. Fixes: 1a9cc7660a ("CP-20482: Create network with the specified bridge name") Signed-off-by: Edwin Török --- ocaml/xapi/xapi_pif.ml | 7 ++++--- ocaml/xapi/xapi_pif.mli | 6 ------ 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index 04e942f4d10..baaef68714d 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -347,7 +347,8 @@ let assert_fcoe_not_in_use ~__context ~self = () ) -let find_or_create_network (bridge : string) (device : string) ~__context = +let find_or_create_network (bridge : string) (device : string) ~managed + ~__context = let nets = Db.Network.get_refs_where ~__context ~expr:(Eq (Field "bridge", Literal bridge)) @@ -362,7 +363,7 @@ let find_or_create_network (bridge : string) (device : string) ~__context = Db.Network.create ~__context ~ref:net_ref ~uuid:net_uuid ~current_operations:[] ~allowed_operations:[] ~name_label:(Helpers.choose_network_name_for_pif device) - ~name_description:"" ~mTU:1500L ~purpose:[] ~bridge ~managed:true + ~name_description:"" ~mTU:1500L ~purpose:[] ~bridge ~managed ~other_config:[] ~blobs:[] ~tags:[] ~default_locking_mode:`unlocked ~assigned_ips:[] in @@ -463,7 +464,7 @@ let introduce_internal ?network ?(physical = true) ~t:_ ~__context ~host ~mAC let net_ref = match network with | None -> - find_or_create_network bridge device ~__context + find_or_create_network bridge device ~managed ~__context | Some x -> x in diff --git a/ocaml/xapi/xapi_pif.mli b/ocaml/xapi/xapi_pif.mli index 93bacd86be5..07c3a85877c 100644 --- a/ocaml/xapi/xapi_pif.mli +++ b/ocaml/xapi/xapi_pif.mli @@ -175,12 +175,6 @@ val assert_usable_for_management : -> unit (** Ensure the PIF can be used for management. *) -val find_or_create_network : - string -> string -> __context:Context.t -> [`network] Ref.t -(** If a network for the given bridge already exists, then return a reference to this network, - * otherwise create a new network and return its reference. -*) - (** Convenient lookup tables for scanning etc *) type tables From 2c7bc39f821c38a5b07ef7c3cfcd0697878357a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 6 Aug 2024 16:39:18 +0100 Subject: [PATCH 225/341] CA-396743: forbid setting NBD purpose on unmanaged networks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We wouldn't be able to add the correct firewall rules, and you're not meant to use the boot from SAN network for NBD. Signed-off-by: Edwin Török --- ocaml/xapi/xapi_network.ml | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xapi/xapi_network.ml b/ocaml/xapi/xapi_network.ml index 3aefbad3be8..37d527a2a34 100644 --- a/ocaml/xapi/xapi_network.ml +++ b/ocaml/xapi/xapi_network.ml @@ -439,6 +439,7 @@ let assert_can_add_purpose ~__context ~network:_ ~current:_ newval = assert_no_net_has_bad_porpoise [`nbd] let add_purpose ~__context ~self ~value = + assert_network_is_managed ~__context ~self ; let current = Db.Network.get_purpose ~__context ~self in if not (List.mem value current) then ( assert_can_add_purpose ~__context ~network:self ~current value ; From 1adffbd62673f2df3be45d026f963ed8f058dc97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 6 Aug 2024 16:35:20 +0100 Subject: [PATCH 226/341] CA-396743: fix bridge name for unmanaged devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no bridge for unmanaged devices, return the empty string instead of a non-existent device. Previously we would've returned 'bribft0' for the 'ibft0' interface. Signed-off-by: Edwin Török --- ocaml/xapi/xapi_pif.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index baaef68714d..b6067a509de 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -458,7 +458,7 @@ let db_forget ~__context ~self = Db.PIF.destroy ~__context ~self let introduce_internal ?network ?(physical = true) ~t:_ ~__context ~host ~mAC ~mTU ~device ~vLAN ~vLAN_master_of ?metrics ~managed ?(disallow_unplug = false) () = - let bridge = bridge_naming_convention device in + let bridge = if managed then bridge_naming_convention device else "" in (* If we are not told which network to use, * apply the default convention *) let net_ref = From d3b3c7469255121488bd06b465190c9f1f9bfb14 Mon Sep 17 00:00:00 2001 From: Lin Liu Date: Fri, 9 Aug 2024 03:13:42 +0000 Subject: [PATCH 227/341] CP-49148: Clean py2 compatible code Signed-off-by: Lin Liu --- ocaml/vhd-tool/test/dummy_extent_reader.py | 3 +-- ocaml/xapi-storage/python/xapi/__init__.py | 23 +++------------------- pyproject.toml | 2 +- python3/libexec/mail-alarm | 5 ----- scripts/Makefile | 5 ----- 5 files changed, 5 insertions(+), 33 deletions(-) diff --git a/ocaml/vhd-tool/test/dummy_extent_reader.py b/ocaml/vhd-tool/test/dummy_extent_reader.py index 1c344af40ef..b692674dded 100755 --- a/ocaml/vhd-tool/test/dummy_extent_reader.py +++ b/ocaml/vhd-tool/test/dummy_extent_reader.py @@ -1,10 +1,9 @@ -#!/usr/bin/python +#!/usr/bin/python3 """ Dummy extent reader that returns a huge extent list """ -from __future__ import print_function import json import sys diff --git a/ocaml/xapi-storage/python/xapi/__init__.py b/ocaml/xapi-storage/python/xapi/__init__.py index 50eae33fe1a..0f7c2a13de3 100644 --- a/ocaml/xapi-storage/python/xapi/__init__.py +++ b/ocaml/xapi-storage/python/xapi/__init__.py @@ -25,30 +25,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -from __future__ import print_function import sys import traceback import json import argparse -# is_str(): Shortcut to check if a value is an instance of a string type. -# -# Replace: -# if not isinstance(code, str) and not isinstance(code, unicode): -# with: -# if not is_str(code): -# -# This makes for much cleaner code and suits Python3 well too. -if sys.version_info[0] > 2: - long = int - def is_str(x): - return isinstance(x, str) # With Python3, all strings are unicode -else: - def is_str(x): # pragma: no cover - return isinstance(x, (str, unicode)) # pylint: disable=undefined-variable - - def success(result): return {"Status": "Success", "Value": result} @@ -84,7 +66,7 @@ class XenAPIException(Exception): def __init__(self, code, params): Exception.__init__(self) - if not is_str(code): + if not isinstance(code, str): raise TypeError("string", repr(code)) if not isinstance(params, list): raise TypeError("list", repr(params)) @@ -151,7 +133,8 @@ def __init__(self, name): def is_long(x): try: - long(x) + # Python3 int is long, keep the name for interface compatibility + int(x) return True except ValueError: return False diff --git a/pyproject.toml b/pyproject.toml index 1881ebbd350..55467081438 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -299,7 +299,7 @@ inputs = [ "ocaml/vhd-tool", ] disable = [ - # Reduce noise from python2 scripts(import yum, xenfsimage, xcp, urlgrabber) + # Reduce noise from python scripts(import yum, xenfsimage, xcp, urlgrabber) "import-error", ] platform = "linux" diff --git a/python3/libexec/mail-alarm b/python3/libexec/mail-alarm index 0b41dd5e0e9..aab40edc46a 100755 --- a/python3/libexec/mail-alarm +++ b/python3/libexec/mail-alarm @@ -121,11 +121,6 @@ def load_mail_language(mail_language): mail_language_pack_path, mail_language + ".json" ) - # this conditional branch won't be executed, it's solely for the purpose of ensuring pass in python2 ut. - if sys.version_info.major == 2: - with open(mail_language_file, "r") as fileh: - return json.load(fileh, encoding="utf-8") - with open(mail_language_file, encoding="utf-8") as fileh: return json.load(fileh) diff --git a/scripts/Makefile b/scripts/Makefile index 15ad2c62d51..4c04da3943c 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -1,8 +1,5 @@ include ../config.mk -SITE_DIR=$(shell python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -SITE3_DIR=$(shell python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") - IPROG=./install.sh 755 IDATA=./install.sh 644 @@ -22,8 +19,6 @@ install: mkdir -p $(DESTDIR)/usr/lib/systemd/system mkdir -p $(DESTDIR)/usr/lib/yum-plugins mkdir -p $(DESTDIR)$(OPTDIR)/packages/post-install-scripts - mkdir -p $(DESTDIR)$(SITE_DIR) - mkdir -p $(DESTDIR)$(SITE3_DIR) mkdir -p $(DESTDIR)/etc/systemd/system/stunnel@xapi.service.d/ $(IPROG) base-path $(DESTDIR)/etc/xapi.d $(IPROG) sm_diagnostics $(DESTDIR)$(LIBEXECDIR) From 7d6967a8ba1c48056f63f63d89b1ffb2acb33fba Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Wed, 10 Jul 2024 10:42:16 +0100 Subject: [PATCH 228/341] Extend Java deserialization support for xen-api dates non-Zulu dates were not parsed correctly Signed-off-by: Danilo Del Busso --- .../xenapi/CustomDateDeserializer.java | 116 ++++++++++++++++-- 1 file changed, 104 insertions(+), 12 deletions(-) diff --git a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java index a0e9bff1a3d..e397ba7e27f 100644 --- a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java +++ b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java @@ -37,21 +37,97 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; +import java.util.TimeZone; /** - * {@link CustomDateDeserializer} is a Jackson JSON deserializer for parsing {@link Date} objects + * {@link CustomDateDeserializer} is a Jackson JSON deserializer for parsing + * {@link Date} objects * from custom date formats used in Xen-API responses. */ public class CustomDateDeserializer extends StdDeserializer { /** - * Array of {@link SimpleDateFormat} objects representing the custom date formats - * used in XenServer API responses. + * Array of {@link SimpleDateFormat} objects representing the date formats + * used in xen-api responses. + * + * RFC-3339 date formats can be returned in either Zulu or time zone agnostic. + * This list is not an exhaustive list of formats supported by RFC-3339, rather + * a set of formats that will enable the deserialization of xen-api dates. + * Formats are listed in order of decreasing precision. When adding + * to this list, please ensure the order is kept. */ - private final SimpleDateFormat[] dateFormatters - = new SimpleDateFormat[]{ + private static final SimpleDateFormat[] dateFormatsUtc = { + // Most commonly returned formats + new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"), + new SimpleDateFormat("ss.SSS"), + + // Other + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"), new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss'Z'"), - new SimpleDateFormat("ss.SSS") + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSS'Z'"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSS'Z'"), + + }; + + /** + * Array of {@link SimpleDateFormat} objects representing the date formats for + * local time. + * These formats are used to parse dates in local time zones. + * Formats are listed in order of decreasing precision. When adding + * to this list, please ensure the order is kept. + */ + private static final SimpleDateFormat[] dateFormatsLocal = { + // no dashes, no colons + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSXXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSSX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss.SSS"), + + new SimpleDateFormat("yyyyMMdd'T'HHmmssZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssZZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssZ"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssXXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssXX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmssX"), + new SimpleDateFormat("yyyyMMdd'T'HHmmss"), + + // no dashes, with colons + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSXXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSSX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss.SSS"), + + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssZ"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssXXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssXX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ssX"), + new SimpleDateFormat("yyyyMMdd'T'HH:mm:ss"), + + // dashes and colons + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"), + + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX"), + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"), }; /** @@ -62,28 +138,44 @@ public CustomDateDeserializer() { } /** - * Constructs a {@link CustomDateDeserializer} instance with the specified value type. + * Constructs a {@link CustomDateDeserializer} instance with the specified value + * type. * * @param t The value type to handle (can be null, handled by superclass) */ public CustomDateDeserializer(Class t) { super(t); + var utcTimeZone = TimeZone.getTimeZone("UTC"); + for (var utcFormatter : dateFormatsUtc) { + utcFormatter.setTimeZone(utcTimeZone); + } } + private static + /** * Deserializes a {@link Date} object from the given JSON parser. * - * @param jsonParser The JSON parser containing the date value to deserialize + * @param jsonParser The JSON parser containing the date value to + * deserialize * @param deserializationContext The deserialization context * @return The deserialized {@link Date} object * @throws IOException if an I/O error occurs during deserialization */ - @Override - public Date deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { + @Override public Date deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) + throws IOException { + var text = jsonParser.getText(); + for (SimpleDateFormat formatter : dateFormatsUtc) { + try { + return formatter.parse(text); + } catch (ParseException e) { + // ignore + } + } - for (SimpleDateFormat formatter : dateFormatters) { + for (SimpleDateFormat formatter : dateFormatsLocal) { try { - return formatter.parse(jsonParser.getText()); + return formatter.parse(text); } catch (ParseException e) { // ignore } From c44a02626e79dd90e33cf4ef55a2f66812aa86a8 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 14 Aug 2024 15:07:16 +0200 Subject: [PATCH 229/341] Add missing typing stubs for xcp.cmd and xcp.compat Signed-off-by: Bernhard Kaindl --- python3/stubs/xcp/cmd.pyi | 13 +++++++++++++ python3/stubs/xcp/compat.pyi | 9 +++++++++ 2 files changed, 22 insertions(+) create mode 100644 python3/stubs/xcp/cmd.pyi create mode 100644 python3/stubs/xcp/compat.pyi diff --git a/python3/stubs/xcp/cmd.pyi b/python3/stubs/xcp/cmd.pyi new file mode 100644 index 00000000000..950a6d28200 --- /dev/null +++ b/python3/stubs/xcp/cmd.pyi @@ -0,0 +1,13 @@ +from basedtyping import Untyped +from typing import Any +from xcp import logger as logger +from xcp.compat import open_defaults_for_utf8_text as open_defaults_for_utf8_text + +def runCmd(command: bytes | str | list[str], with_stdout: bool = False, with_stderr: bool = False, inputtext: bytes | str | None = None, **kwargs: Any) -> Any: ... + +class OutputCache: + cache: Untyped + def __init__(self): ... + def fileContents(self, fn, *args, **kwargs) -> Untyped: ... + def runCmd(self, command, with_stdout: bool = False, with_stderr: bool = False, inputtext: Untyped | None = None, **kwargs) -> Untyped: ... + def clearCache(self): ... diff --git a/python3/stubs/xcp/compat.pyi b/python3/stubs/xcp/compat.pyi new file mode 100644 index 00000000000..bd2c7cfa4a6 --- /dev/null +++ b/python3/stubs/xcp/compat.pyi @@ -0,0 +1,9 @@ +from basedtyping import Untyped +from typing import Any, IO + +def open_textfile(filename: str, mode: str, encoding: str = 'utf-8', **kwargs: Any) -> IO[str]: ... + +open_utf8: Untyped + +def open_with_codec_handling(filename: str, mode: str = 'r', encoding: str = 'utf-8', **kwargs: Any) -> IO[Any]: ... +def open_defaults_for_utf8_text(args: tuple[Any, ...] | None, kwargs: Any) -> tuple[str, Any]: ... From 5e40c09a3fef7ffb3b7239b0db5ab4e2fdde2457 Mon Sep 17 00:00:00 2001 From: Bernhard Kaindl Date: Wed, 14 Aug 2024 15:30:10 +0200 Subject: [PATCH 230/341] rrdd: Test the changed rrdd.API.update() method Signed-off-by: Bernhard Kaindl --- .../rrdd/test_api_wait_until_next_reading.py | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py b/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py index 5ca9b897fad..a038513e230 100644 --- a/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py +++ b/ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py @@ -1,7 +1,11 @@ # Test: pytest -v -s ocaml/xcp-rrdd/scripts/rrdd/test_api_wait_until_next_reading.py """Parametrized test exercising all conditions in rrdd.API.wait_until_next_reading()""" +import json import socket +from io import BytesIO +from struct import pack, unpack from warnings import catch_warnings as import_without_warnings, simplefilter +from zlib import crc32 # Dependencies: # pip install pytest-mock @@ -77,3 +81,114 @@ def test_api_getter_functions(api): api.path = "path" assert api.get_header() == "header" assert api.get_path() == "path" + + +class MockDataSource: + """Mock class for testing the rrdd.API.update() method""" + def __init__(self, name, metadata, packed_data): + self.name = name + self.metadata = metadata + self.packed_data = packed_data + + def pack_data(self): + """Simple substitute for the pack_data() method of the rrdd.DataSource class""" + return self.packed_data + + +@pytest.mark.parametrize( + "data_sources, expected_metadata", + [ + pytest.param( + [ + MockDataSource("ds1", {"key1": "value1"}, b"\x00\x01"), + MockDataSource("ds2", {"key2": "value2"}, b"\x00\x02"), + ], + {"key1": "value1", "key2": "value2"}, + ), + pytest.param( + [MockDataSource("ds1", {"key1": "value1"}, b"\x00\x01")], + {"key1": "value1"}, + ), + pytest.param( + [], + {}, + ), + ], +) +def test_update( + mocker, + data_sources, + expected_metadata, +): + """Test the update() method of the rrdd.API class""" + # Arrange + def checksum(*args): + """Calculate the CRC32 checksum of the given arguments""" + return crc32(*args) & 0xFFFFFFFF + + class MockAPI(rrdd.API): + """Mock API class to test the update() method""" + def __init__(self): # pylint: disable=super-init-not-called + self.dest = BytesIO() + self.datasources = data_sources + + def pack_data(self, ds: MockDataSource): + return ds.pack_data() + + testee = MockAPI() + testee.deregister = mocker.Mock() + fixed_time = 1234567890 + mocker.patch("time.time", return_value=fixed_time) + + # Act + testee.update() + + # Assert + + # Read and unpack the header + testee.dest.seek(0) + # The header is 20 bytes long and has the following format: + # 0-11: "DATASOURCES" (12 bytes) + # 12-15: data_checksum (4 bytes) + # 16-19: metadata_checksum (4 bytes) + # 20-23: num_datasources (4 bytes) + # 24-31: timestamp (8 bytes) + header_len = len("DATASOURCES") + 4 + 4 + 4 + 8 + header = testee.dest.read(header_len) + ( + unpacked_data_checksum, + unpacked_metadata_checksum, + unpacked_num_datasources, + unpacked_timestamp, + ) = unpack(">LLLQ", header[11:]) + + # Assert the expected unpacked header value + assert header.startswith(b"DATASOURCES") + assert unpacked_num_datasources == len(data_sources) + assert unpacked_timestamp == fixed_time + + # + # Assert datasources and the expected data checksum + # + + # Initialize the expected checksum with the fixed time + expected_checksum = checksum(pack(">Q", fixed_time)) + # Loop over the datasources and assert the packed data + testee.dest.seek(header_len) + # sourcery skip: no-loop-in-tests + for ds in data_sources: + packed_data = testee.dest.read(len(ds.pack_data())) + assert packed_data == ds.pack_data() + # Update the checksum with the packed data + expected_checksum = checksum(packed_data, expected_checksum) + + assert unpacked_data_checksum == expected_checksum + + # + # Assert metadata and the expected metadata checksum + # + metadata_length = unpack(">L", testee.dest.read(4))[0] + metadata_json = testee.dest.read(metadata_length) + + assert json.loads(metadata_json) == {"datasources": expected_metadata} + assert unpacked_metadata_checksum == checksum(metadata_json) From bdf5268523f996d3fee5aa2930b208adc653049b Mon Sep 17 00:00:00 2001 From: Lin Liu Date: Tue, 20 Aug 2024 02:29:26 +0000 Subject: [PATCH 231/341] CP-49148: More clean python2 code - Update following embeded shellbang to python3 * generate-iscsi-iqn * xe-backup-metadata * xe-restore-metadata - Remove interop-test.sh as not used Signed-off-by: Lin Liu --- ocaml/message-switch/core_test/interop-test.sh | 9 --------- scripts/generate-iscsi-iqn | 2 +- scripts/xe-backup-metadata | 2 +- scripts/xe-restore-metadata | 2 +- 4 files changed, 3 insertions(+), 12 deletions(-) delete mode 100755 ocaml/message-switch/core_test/interop-test.sh diff --git a/ocaml/message-switch/core_test/interop-test.sh b/ocaml/message-switch/core_test/interop-test.sh deleted file mode 100755 index 912d47f2349..00000000000 --- a/ocaml/message-switch/core_test/interop-test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -ex - -LINKPATH="${TMPDIR:-/tmp}/link_test" - -rm -rf ${LINKPATH} && mkdir -p ${LINKPATH} - -lwt/link_test_main.exe -PYTHONPATH=core python message_switch_test.py diff --git a/scripts/generate-iscsi-iqn b/scripts/generate-iscsi-iqn index 882a4c7f6fd..9550435716d 100755 --- a/scripts/generate-iscsi-iqn +++ b/scripts/generate-iscsi-iqn @@ -36,7 +36,7 @@ geniqn() { domain=${defaultdomain} fi - revdomain=$(python -c "${REVERSE_PY}" $domain) + revdomain=$(python3 -c "${REVERSE_PY}" $domain) uuid=$(uuidgen | cut -d- -f1) date=$(date +"%Y-%m") diff --git a/scripts/xe-backup-metadata b/scripts/xe-backup-metadata index 43c4617ec3b..19f0cf0e4a9 100755 --- a/scripts/xe-backup-metadata +++ b/scripts/xe-backup-metadata @@ -51,7 +51,7 @@ function usage { function uuid5 { # could use a modern uuidgen but it's not on XS 8 # should work with Python 2 and 3 - python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" + python3 -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" } function test_sr { diff --git a/scripts/xe-restore-metadata b/scripts/xe-restore-metadata index 5968dc102e8..ca7029d7c07 100755 --- a/scripts/xe-restore-metadata +++ b/scripts/xe-restore-metadata @@ -65,7 +65,7 @@ function test_sr { NS="e93e0639-2bdb-4a59-8b46-352b3f408c19" function uuid5 { # could use a modern uuidgen but it's not on XS 8 - python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" + python3 -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" } dry_run=0 From 9601bb19ff35e80dd169914ef0cb9bd0605c04c2 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 21 Aug 2024 09:47:14 +0100 Subject: [PATCH 232/341] Update record_util tests to the current state Automatically regenerates the all_enum file, copies the current record_util, and regenerates the test cases with the script specified in the file itself. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/old_enum_all.ml | 33 ++- ocaml/tests/record_util/old_record_util.ml | 303 +++++++++----------- ocaml/tests/record_util/test_record_util.ml | 14 +- 3 files changed, 174 insertions(+), 176 deletions(-) diff --git a/ocaml/tests/record_util/old_enum_all.ml b/ocaml/tests/record_util/old_enum_all.ml index 8c5b422365c..f58cdc7542f 100644 --- a/ocaml/tests/record_util/old_enum_all.ml +++ b/ocaml/tests/record_util/old_enum_all.ml @@ -1,3 +1,7 @@ +let all_placement_policy = [`anti_affinity; `normal] + +let all_origin = [`remote; `bundle] + let all_certificate_type = [`ca; `host; `host_internal] let all_cluster_host_operation = [`enable; `disable; `destroy] @@ -22,7 +26,7 @@ let all_vgpu_type_implementation = let all_allocation_algorithm = [`breadth_first; `depth_first] -let all_pgpu_dom0_access = +let all_pci_dom0_access = [`enabled; `disable_on_reboot; `disabled; `enable_on_reboot] let all_sriov_configuration_mode = [`sysfs; `modprobe; `manual; `unknown] @@ -135,16 +139,19 @@ let all_host_numa_affinity_policy = [`any; `best_effort; `default_policy] let all_host_sched_gran = [`core; `cpu; `socket] -let all_latest_synced_updates_applied_state = [`yes; `no; `unknown] - let all_update_guidances = [ `reboot_host ; `reboot_host_on_livepatch_failure + ; `reboot_host_on_kernel_livepatch_failure + ; `reboot_host_on_xen_livepatch_failure ; `restart_toolstack ; `restart_device_model + ; `restart_vm ] +let all_latest_synced_updates_applied_state = [`yes; `no; `unknown] + let all_host_display = [`enabled; `disable_on_reboot; `disabled; `enable_on_reboot] @@ -159,6 +166,7 @@ let all_host_allowed_operations = ; `vm_resume ; `vm_migrate ; `apply_updates + ; `enable ] let all_vm_appliance_operation = @@ -180,6 +188,8 @@ let all_tristate_type = [`yes; `no; `unspecified] let all_domain_type = [`hvm; `pv; `pv_in_pvh; `pvh; `unspecified] +let all_vm_uefi_mode = [`setup; `user] + let all_on_crash_behaviour = [ `destroy @@ -267,6 +277,7 @@ let all_pool_allowed_operations = ; `designate_new_master ; `configure_repositories ; `sync_updates + ; `sync_bundle ; `get_updates ; `apply_updates ; `tls_verification_enable @@ -274,6 +285,7 @@ let all_pool_allowed_operations = ; `exchange_certificates_on_join ; `exchange_ca_certificates_on_join ; `copy_primary_host_certs + ; `eject ] let all_task_status_type = @@ -283,9 +295,22 @@ let all_task_allowed_operations = [`cancel; `destroy] let all_hello_return = [`ok; `unknown_host; `cannot_talk_back] +let all_pool_guest_secureboot_readiness = [`ready; `ready_no_dbx; `not_ready] + let all_livepatch_status = [`ok_livepatch_complete; `ok_livepatch_incomplete; `ok] -let all_sr_health = [`healthy; `recovering] +let all_vm_secureboot_readiness = + [ + `not_supported + ; `disabled + ; `first_boot + ; `ready + ; `ready_no_dbx + ; `setup_mode + ; `certs_incomplete + ] + +let all_sr_health = [`healthy; `recovering; `unreachable; `unavailable] let all_event_operation = [`add; `del; `_mod] diff --git a/ocaml/tests/record_util/old_record_util.ml b/ocaml/tests/record_util/old_record_util.ml index ad38fe7ea37..2c98955fffd 100644 --- a/ocaml/tests/record_util/old_record_util.ml +++ b/ocaml/tests/record_util/old_record_util.ml @@ -15,6 +15,9 @@ exception Record_failure of string +let record_failure fmt = + Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt + let to_str = function Rpc.String x -> x | _ -> failwith "Invalid" let certificate_type_to_string = function @@ -151,6 +154,38 @@ let string_to_vm_operation x = else List.assoc x table +let vm_uefi_mode_of_string = function + | "setup" -> + `setup + | "user" -> + `user + | s -> + record_failure "Expected 'user','setup', got %s" s + +let vm_secureboot_readiness_to_string = function + | `not_supported -> + "not_supported" + | `disabled -> + "disabled" + | `first_boot -> + "first_boot" + | `ready -> + "ready" + | `ready_no_dbx -> + "ready_no_dbx" + | `setup_mode -> + "setup_mode" + | `certs_incomplete -> + "certs_incomplete" + +let pool_guest_secureboot_readiness_to_string = function + | `ready -> + "ready" + | `ready_no_dbx -> + "ready_no_dbx" + | `not_ready -> + "not_ready" + let pool_operation_to_string = function | `ha_enable -> "ha_enable" @@ -166,6 +201,8 @@ let pool_operation_to_string = function "configure_repositories" | `sync_updates -> "sync_updates" + | `sync_bundle -> + "sync_bundle" | `get_updates -> "get_updates" | `apply_updates -> @@ -178,6 +215,8 @@ let pool_operation_to_string = function "exchange_ca_certificates_on_join" | `copy_primary_host_certs -> "copy_primary_host_certs" + | `eject -> + "eject" let host_operation_to_string = function | `provision -> @@ -198,16 +237,24 @@ let host_operation_to_string = function "VM.migrate" | `apply_updates -> "apply_updates" + | `enable -> + "enable" let update_guidance_to_string = function | `reboot_host -> "reboot_host" | `reboot_host_on_livepatch_failure -> "reboot_host_on_livepatch_failure" + | `reboot_host_on_kernel_livepatch_failure -> + "reboot_host_on_kernel_livepatch_failure" + | `reboot_host_on_xen_livepatch_failure -> + "reboot_host_on_xen_livepatch_failure" | `restart_toolstack -> "restart_toolstack" | `restart_device_model -> "restart_device_model" + | `restart_vm -> + "restart_vm" let latest_synced_updates_applied_state_to_string = function | `yes -> @@ -343,12 +390,8 @@ let string_to_vif_locking_mode = function | "disabled" -> `disabled | s -> - raise - (Record_failure - ("Expected 'network_default', 'locked', 'unlocked', 'disabled', got " - ^ s - ) - ) + record_failure + "Expected 'network_default', 'locked', 'unlocked', 'disabled', got %s" s let vmss_type_to_string = function | `snapshot -> @@ -366,12 +409,8 @@ let string_to_vmss_type = function | "snapshot_with_quiesce" -> `snapshot_with_quiesce | s -> - raise - (Record_failure - ("Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got " - ^ s - ) - ) + record_failure + "Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got %s" s let vmss_frequency_to_string = function | `hourly -> @@ -389,7 +428,7 @@ let string_to_vmss_frequency = function | "weekly" -> `weekly | s -> - raise (Record_failure ("Expected 'hourly', 'daily', 'weekly', got " ^ s)) + record_failure "Expected 'hourly', 'daily', 'weekly', got %s" s let network_default_locking_mode_to_string = function | `unlocked -> @@ -403,7 +442,7 @@ let string_to_network_default_locking_mode = function | "disabled" -> `disabled | s -> - raise (Record_failure ("Expected 'unlocked' or 'disabled', got " ^ s)) + record_failure "Expected 'unlocked' or 'disabled', got %s" s let network_purpose_to_string : API.network_purpose -> string = function | `nbd -> @@ -417,7 +456,7 @@ let string_to_network_purpose : string -> API.network_purpose = function | "insecure_nbd" -> `insecure_nbd | s -> - raise (Record_failure ("Expected a network purpose string; got " ^ s)) + record_failure "Expected a network purpose string; got %s" s let vm_appliance_operation_to_string = function | `start -> @@ -605,7 +644,7 @@ let string_to_on_normal_exit s = | "restart" -> `restart | _ -> - raise (Record_failure ("Expected 'destroy' or 'restart', got " ^ s)) + record_failure "Expected 'destroy' or 'restart', got %s" s let on_crash_behaviour_to_string x = match x with @@ -637,14 +676,11 @@ let string_to_on_crash_behaviour s = | "rename_restart" -> `rename_restart | _ -> - raise - (Record_failure - ("Expected 'destroy', 'coredump_and_destroy'," - ^ "'restart', 'coredump_and_restart', 'preserve' or \ - 'rename_restart', got " - ^ s - ) - ) + record_failure + "Expected 'destroy', 'coredump_and_destroy', \ + 'restart','coredump_and_restart', 'preserve' or 'rename_restart', got \ + %s" + s let on_softreboot_behaviour_to_string x = match x with @@ -668,14 +704,11 @@ let string_to_on_softreboot_behaviour s = | "soft_reboot" -> `soft_reboot | _ -> - raise - (Record_failure - ("Expected 'destroy', 'coredump_and_destroy'," - ^ "'restart', 'coredump_and_restart', 'preserve', 'soft_reboot' or \ - 'rename_restart', got " - ^ s - ) - ) + record_failure + "Expected 'destroy', 'coredump_and_destroy', 'restart', \ + 'coredump_and_restart', 'preserve', 'soft_reboot' or \ + 'rename_restart', got %s" + s let host_display_to_string h = match h with @@ -697,7 +730,7 @@ let host_sched_gran_of_string s = | "socket" -> `socket | _ -> - raise (Record_failure ("Expected 'core','cpu', 'socket', got " ^ s)) + record_failure "Expected 'core','cpu', 'socket', got %s" s let host_sched_gran_to_string = function | `core -> @@ -724,10 +757,8 @@ let host_numa_affinity_policy_of_string a = | "default_policy" -> `default_policy | s -> - raise - (Record_failure - ("Expected 'any', 'best_effort' or 'default_policy', got " ^ s) - ) + record_failure "Expected 'any', 'best_effort' or 'default_policy', got %s" + s let pci_dom0_access_to_string x = host_display_to_string x @@ -738,7 +769,7 @@ let string_to_vdi_onboot s = | "reset" -> `reset | _ -> - raise (Record_failure ("Expected 'persist' or 'reset', got " ^ s)) + record_failure "Expected 'persist' or 'reset', got %s" s let string_to_vbd_mode s = match String.lowercase_ascii s with @@ -747,7 +778,7 @@ let string_to_vbd_mode s = | "rw" -> `RW | _ -> - raise (Record_failure ("Expected 'RO' or 'RW', got " ^ s)) + record_failure "Expected 'RO' or 'RW', got %s" s let vbd_mode_to_string = function `RO -> "ro" | `RW -> "rw" @@ -760,7 +791,7 @@ let string_to_vbd_type s = | "floppy" -> `Floppy | _ -> - raise (Record_failure ("Expected 'CD' or 'Disk', got " ^ s)) + record_failure "Expected 'CD' or 'Disk', got %s" s let power_to_string h = match h with @@ -819,7 +850,7 @@ let ip_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'dhcp','none' or 'static', got " ^ s)) + record_failure "Expected 'dhcp','none' or 'static', got %s" s let vif_ipv4_configuration_mode_to_string = function | `None -> @@ -834,7 +865,7 @@ let vif_ipv4_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'none' or 'static', got " ^ s)) + record_failure "Expected 'none' or 'static', got %s" s let ipv6_configuration_mode_to_string = function | `None -> @@ -857,10 +888,7 @@ let ipv6_configuration_mode_of_string m = | "autoconf" -> `Autoconf | s -> - raise - (Record_failure - ("Expected 'dhcp','none' 'autoconf' or 'static', got " ^ s) - ) + record_failure "Expected 'dhcp','none' 'autoconf' or 'static', got %s" s let vif_ipv6_configuration_mode_to_string = function | `None -> @@ -875,7 +903,7 @@ let vif_ipv6_configuration_mode_of_string m = | "static" -> `Static | s -> - raise (Record_failure ("Expected 'none' or 'static', got " ^ s)) + record_failure "Expected 'none' or 'static', got %s" s let primary_address_type_to_string = function | `IPv4 -> @@ -890,7 +918,7 @@ let primary_address_type_of_string m = | "ipv6" -> `IPv6 | s -> - raise (Record_failure ("Expected 'ipv4' or 'ipv6', got " ^ s)) + record_failure "Expected 'ipv4' or 'ipv6', got %s" s let bond_mode_to_string = function | `balanceslb -> @@ -909,7 +937,7 @@ let bond_mode_of_string m = | "lacp" -> `lacp | s -> - raise (Record_failure ("Invalid bond mode. Got " ^ s)) + record_failure "Invalid bond mode. Got %s" s let allocation_algorithm_to_string = function | `depth_first -> @@ -924,7 +952,7 @@ let allocation_algorithm_of_string a = | "breadth-first" -> `breadth_first | s -> - raise (Record_failure ("Invalid allocation algorithm. Got " ^ s)) + record_failure "Invalid allocation algorithm. Got %s" s let pvs_proxy_status_to_string = function | `stopped -> @@ -945,12 +973,13 @@ let cluster_host_operation_to_string op = let bool_of_string s = match String.lowercase_ascii s with - | "true" | "yes" -> + | "true" | "t" | "yes" | "y" | "1" -> true - | "false" | "no" -> + | "false" | "f" | "no" | "n" | "0" -> false | _ -> - raise (Record_failure ("Expected 'true','yes','false','no', got " ^ s)) + record_failure + "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s let sdn_protocol_of_string s = match String.lowercase_ascii s with @@ -959,7 +988,7 @@ let sdn_protocol_of_string s = | "pssl" -> `pssl | _ -> - raise (Record_failure ("Expected 'ssl','pssl', got " ^ s)) + record_failure "Expected 'ssl','pssl', got %s" s let sdn_protocol_to_string = function `ssl -> "ssl" | `pssl -> "pssl" @@ -970,7 +999,7 @@ let tunnel_protocol_of_string s = | "vxlan" -> `vxlan | _ -> - raise (Record_failure ("Expected 'gre','vxlan', got " ^ s)) + record_failure "Expected 'gre','vxlan', got %s" s let tunnel_protocol_to_string = function `gre -> "gre" | `vxlan -> "vxlan" @@ -1000,14 +1029,6 @@ let network_sriov_configuration_mode_to_string = function | `unknown -> "unknown" -(* string_to_string_map_to_string *) -let s2sm_to_string sep x = - String.concat sep (List.map (fun (a, b) -> a ^ ": " ^ b) x) - -(* string to blob ref map to string *) -let s2brm_to_string get_uuid_from_ref sep x = - String.concat sep (List.map (fun (n, r) -> n ^ ": " ^ get_uuid_from_ref r) x) - let on_boot_to_string onboot = match onboot with `reset -> "reset" | `persist -> "persist" @@ -1043,119 +1064,42 @@ let domain_type_of_string x = | "pvh" -> `pvh | s -> - raise (Record_failure ("Invalid domain type. Got " ^ s)) + record_failure "Invalid domain type. Got %s" s let vtpm_operation_to_string (op : API.vtpm_operations) = match op with `destroy -> "destroy" -(** Parse a string which might have a units suffix on the end *) -let bytes_of_string field x = +(** parse [0-9]*(b|bytes|kib|mib|gib|tib)* to bytes *) +let bytes_of_string str = let ( ** ) a b = Int64.mul a b in - let max_size_TiB = - Int64.div Int64.max_int (1024L ** 1024L ** 1024L ** 1024L) - in - (* detect big number that cannot be represented by Int64. *) - let int64_of_string s = - try Int64.of_string s - with _ -> - if s = "" then - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly \ - with suffix)" - field - ) - ) ; - let alldigit = ref true and i = ref (String.length s - 1) in - while !alldigit && !i > 0 do - alldigit := Astring.Char.Ascii.is_digit s.[!i] ; - decr i - done ; - if !alldigit then - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': number too big (maximum = %Ld TiB)" - field max_size_TiB - ) - ) - else - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly \ - with suffix)" - field - ) - ) - in - match - Astring.( - String.fields ~empty:false ~is_sep:(fun c -> - Char.Ascii.(is_white c || is_digit c) - ) - ) - x - with - | [] -> - (* no suffix on the end *) - int64_of_string x - | [suffix] -> - let number = - match - Astring.( - String.fields ~empty:false ~is_sep:(Fun.negate Char.Ascii.is_digit) - ) - x - with - | [number] -> - int64_of_string number - | _ -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer \ - (possibly with suffix)" - field - ) - ) - in - let multiplier = - match suffix with - | "bytes" -> - 1L - | "KiB" -> - 1024L - | "MiB" -> - 1024L ** 1024L - | "GiB" -> - 1024L ** 1024L ** 1024L - | "TiB" -> - 1024L ** 1024L ** 1024L ** 1024L - | x -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': Unknown suffix: '%s' (try \ - KiB, MiB, GiB or TiB)" - field x - ) - ) - in - (* FIXME: detect overflow *) - number ** multiplier - | _ -> - raise - (Record_failure - (Printf.sprintf - "Failed to parse field '%s': expecting an integer (possibly with \ - suffix)" - field - ) - ) + let invalid msg = raise (Invalid_argument msg) in + try + Scanf.sscanf str "%Ld %s" @@ fun size suffix -> + match String.lowercase_ascii suffix with + | _ when size < 0L -> + invalid str + | "bytes" | "b" | "" -> + size + | "kib" | "kb" | "k" -> + size ** 1024L + | "mib" | "mb" | "m" -> + size ** 1024L ** 1024L + | "gib" | "gb" | "g" -> + size ** 1024L ** 1024L ** 1024L + | "tib" | "tb" | "t" -> + size ** 1024L ** 1024L ** 1024L ** 1024L + | _ -> + invalid suffix + with _ -> invalid str -(* Vincent's random mac utils *) +(** Parse a string which might have a units suffix on the end *) +let bytes_of_string field x = + try bytes_of_string x + with Invalid_argument _ -> + record_failure + "Failed to parse field '%s': expecting an integer (possibly with suffix \ + KiB, MiB, GiB, TiB), got '%s'" + field x let mac_from_int_array macs = (* make sure bit 1 (local) is set and bit 0 (unicast) is clear *) @@ -1179,4 +1123,21 @@ let update_sync_frequency_of_string s = | "weekly" -> `weekly | _ -> - raise (Record_failure ("Expected 'daily', 'weekly', got " ^ s)) + record_failure "Expected 'daily', 'weekly', got %s" s + +let vm_placement_policy_to_string = function + | `normal -> + "normal" + | `anti_affinity -> + "anti-affinity" + +let vm_placement_policy_of_string a = + match String.lowercase_ascii a with + | "normal" -> + `normal + | "anti-affinity" -> + `anti_affinity + | s -> + record_failure "Invalid VM placement policy, got %s" s + +let repo_origin_to_string = function `remote -> "remote" | `bundle -> "bundle" diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 3ed5c2d7351..38c3bd82a87 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -94,6 +94,12 @@ let tests = (O.power_state_to_string, N.power_state_to_string) ; mk __LINE__ None all_vm_operations (O.vm_operation_to_string, N.vm_operation_to_string) + ; mk __LINE__ None all_vm_secureboot_readiness + (O.vm_secureboot_readiness_to_string, N.vm_secureboot_readiness_to_string) + ; mk __LINE__ None all_pool_guest_secureboot_readiness + ( O.pool_guest_secureboot_readiness_to_string + , N.pool_guest_secureboot_readiness_to_string + ) ; mk __LINE__ None all_pool_allowed_operations (O.pool_operation_to_string, N.pool_operation_to_string) ; mk __LINE__ None all_host_allowed_operations @@ -157,7 +163,7 @@ let tests = ( O.host_numa_affinity_policy_to_string , N.host_numa_affinity_policy_to_string ) - ; mk __LINE__ None all_pgpu_dom0_access + ; mk __LINE__ None all_pci_dom0_access (O.pci_dom0_access_to_string, N.pci_dom0_access_to_string) ; mk __LINE__ None all_vbd_mode (O.vbd_mode_to_string, N.vbd_mode_to_string) (*; mk __LINE__ None all_power (O.power_to_string, N.power_to_string)*) @@ -245,6 +251,12 @@ let tests = ) all_update_sync_frequency (O.update_sync_frequency_to_string, N.update_sync_frequency_to_string) + ; mk __LINE__ + (Some (O.vm_placement_policy_of_string, N.vm_placement_policy_of_string)) + all_placement_policy + (O.vm_placement_policy_to_string, N.vm_placement_policy_to_string) + ; mk __LINE__ None all_origin + (O.repo_origin_to_string, N.repo_origin_to_string) ] |> List.concat From 42f84e606b3dc5214e544da82f3f522626fd07c5 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 15:25:14 +0100 Subject: [PATCH 233/341] IH-689: record_util tests should accept a more lenient new of_string Before, tests would expect a new version of record_util to throw an exception where the old version did. We are not harming anyone by instead starting to be case-insensitive for new of_string functions - this only expands the accepted set. Also reformat the autogeneration script to be more readable. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 39 ++++++++++++++++----- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 38c3bd82a87..a803ef7c691 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -40,22 +40,37 @@ let exn_to_string_strip e = *) e |> Printexc.to_string |> drop_module_prefix |> drop_exn_arguments -let exn_equal_strip a b = - String.equal (exn_to_string_strip a) (exn_to_string_strip b) - -let exn = V1.testable (Fmt.of_to_string exn_to_string_strip) exn_equal_strip - let test_of_string ~name all_enum old_to_string of_string_opt = + let exn_equal_strip a b = + String.equal (exn_to_string_strip a) (exn_to_string_strip b) + in + (* New function is allowed to be more lenient and accept more cases *) + let custom_eq expected actual = + match (expected, actual) with + | Error _, Ok _ -> + true + | Error a, Error b -> + exn_equal_strip a b + | a, b -> + a = b + in of_string_opt |> Option.map (fun (old_of_string, new_of_string) -> let make input = V1.test_case input `Quick @@ fun () -> let expected = wrap old_of_string input in let actual = wrap new_of_string input in - let pp_enum = Fmt.of_to_string old_to_string in + let pp_enum_result = + Fmt.of_to_string (function + | Ok a -> + old_to_string a + | Error b -> + exn_to_string_strip b + ) + in V1.( check' ~msg:"compatible" ~expected ~actual - @@ result (testable pp_enum ( = )) exn + @@ testable pp_enum_result custom_eq ) in ( name ^ "of_string" @@ -81,7 +96,15 @@ let mk line of_string_opt all_enum (old_to_string, new_to_string) = (* Created by: ``` -grep 'let.*to_string' old_record_util.ml | sed -re 's/^let ([^ ]+)_to_string.*/\1/' | while read ENUM; do if grep "${ENUM}_of_string" old_record_util.ml >/dev/null; then echo "; mk __LINE__ (Some (O.${ENUM}_of_string, N.${ENUM}_of_string)) all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; else echo "; mk __LINE__ None all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; fi; done +grep 'let.*to_string' old_record_util.ml | \ +sed -re 's/^let ([^ ]+)_to_string.*/\1/' | \ +while read ENUM; do + if grep "${ENUM}_of_string" old_record_util.ml >/dev/null; then + echo "; mk __LINE__ (Some (O.${ENUM}_of_string, N.${ENUM}_of_string)) all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; + else + echo "; mk __LINE__ None all_${ENUM} (O.${ENUM}_to_string, N.${ENUM}_to_string)"; + fi; +done ``` and then tweaked to compile using LSP hints where the names were not consistent (e.g. singular vs plural, etc.) *) From bc0912cd550cbe2916f20464aed03b9512949be0 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 15:34:57 +0100 Subject: [PATCH 234/341] IH-689: Make old record_util exception behaviour consistent with the new one Tests expect not only an exception thrown in the new record_util where it was thrown in the old one, but for it to be the same type. Corrects an outlier in the old code that returned something other than Record_failure. The only usage in xapi_message.ml was guarded by a wildcard try X with _ -> , so this does not break anything. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/old_record_util.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/tests/record_util/old_record_util.ml b/ocaml/tests/record_util/old_record_util.ml index 2c98955fffd..c854f27f5aa 100644 --- a/ocaml/tests/record_util/old_record_util.ml +++ b/ocaml/tests/record_util/old_record_util.ml @@ -72,7 +72,7 @@ let string_to_class str = | "Certificate" -> `Certificate | _ -> - failwith "Bad type" + record_failure "Bad type" let power_state_to_string state = match state with From cd7f9ed14158176b4aeecebafecd40633102774f Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 21 Aug 2024 11:09:53 +0100 Subject: [PATCH 235/341] IH-689: Autogenerate a new record_utils.ml Does not include it anywhere yet - this is just the autogeneration code. For every datamodel enum, generates to_string and of_string functions. to_string is straightforward, but of_string needs to: 1) lowercase its input to accept a wider array of cases 2) throw an exception when nothing is matched, specifying possible values. Signed-off-by: Andrii Sultanov --- ocaml/idl/ocaml_backend/gen_api.ml | 32 +++++++++++++++++++++++++ ocaml/idl/ocaml_backend/gen_api_main.ml | 14 ++++++++++- ocaml/idl/ocaml_backend/ocaml_utils.ml | 13 ++++++++++ ocaml/xapi-cli-server/dune | 10 ++++++++ 4 files changed, 68 insertions(+), 1 deletion(-) diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 564121ab819..90fb78d39dc 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -354,6 +354,38 @@ let toposort_types highapi types = assert (List.sort compare result = List.sort compare types) ; result +let gen_record_deserialization highapi = + let gen_of_to_string types = + let gen_string_and_all = function + | DT.Set (DT.Enum (_, elist) as e) -> + let nlist = List.map fst elist in + [ + (Printf.sprintf "let %s_of_string str = %s") + (OU.alias_of_ty e) + (OU.ocaml_of_string_of_enum nlist) + ; (Printf.sprintf "let %s_to_string = %s") + (OU.alias_of_ty e) + (OU.ocaml_to_string_of_enum nlist) + ] + | _ -> + [] + in + List.concat_map gen_string_and_all types + in + let all_types = all_types_of highapi in + let all_types = add_set_enums all_types in + List.iter (List.iter print) + (between [""] + [ + [ + "exception Record_failure of string" + ; "let record_failure fmt =" + ; "Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt" + ] + ; gen_of_to_string all_types + ] + ) + let gen_client_types highapi = let all_types = all_types_of highapi in let all_types = add_set_enums all_types in diff --git a/ocaml/idl/ocaml_backend/gen_api_main.ml b/ocaml/idl/ocaml_backend/gen_api_main.ml index 4765e498278..41ffde51a8f 100644 --- a/ocaml/idl/ocaml_backend/gen_api_main.ml +++ b/ocaml/idl/ocaml_backend/gen_api_main.ml @@ -73,7 +73,17 @@ let _ = [ ( "-mode" , Arg.Symbol - ( ["client"; "server"; "api"; "db"; "actions"; "sql"; "rbac"; "test"] + ( [ + "client" + ; "server" + ; "api" + ; "utils" + ; "db" + ; "actions" + ; "sql" + ; "rbac" + ; "test" + ] , fun x -> mode := Some x ) , "Choose which file to output" @@ -114,6 +124,8 @@ let _ = Gen_api.gen_client api | Some "api" -> Gen_api.gen_client_types api + | Some "utils" -> + Gen_api.gen_record_deserialization api | Some "server" -> Gen_api.gen_server api | Some "db" -> diff --git a/ocaml/idl/ocaml_backend/ocaml_utils.ml b/ocaml/idl/ocaml_backend/ocaml_utils.ml index a01ae955586..3a6436c67f4 100644 --- a/ocaml/idl/ocaml_backend/ocaml_utils.ml +++ b/ocaml/idl/ocaml_backend/ocaml_utils.ml @@ -101,6 +101,19 @@ let ocaml_to_string_of_enum list = let single name = Printf.sprintf {|%s -> "%s"|} (constructor_of name) name in Printf.sprintf "function %s" (ocaml_map_enum_ " | " single list) +(** Create the body of an of_string function for an enum *) +let ocaml_of_string_of_enum list = + let single name = + Printf.sprintf {|"%s" -> %s|} + (String.lowercase_ascii name) + (constructor_of name) + in + let quoted name = Printf.sprintf {|'%s'|} name in + Printf.sprintf + {|match String.lowercase_ascii str with %s | s -> record_failure "Expected one of %s, got %%s" s|} + (ocaml_map_enum_ " | " single list) + (ocaml_map_enum_ ", " quoted list) + (** Convert an IDL type into a string containing OCaml code representing the type. *) let rec ocaml_of_ty = function diff --git a/ocaml/xapi-cli-server/dune b/ocaml/xapi-cli-server/dune index ff3efb6c7b0..8d7cacd6ad8 100644 --- a/ocaml/xapi-cli-server/dune +++ b/ocaml/xapi-cli-server/dune @@ -1,3 +1,13 @@ +(rule + (targets generated_record_utils.ml) + (deps + ../idl/ocaml_backend/gen_api_main.exe + ) + (action + (run %{deps} -filterinternal true -filter closed -mode utils -output + %{targets})) +) + (library (name xapi_cli_server) (modes best) From 09c73a4e79daad2ed8c00475c9ead99b12ea7591 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 13:52:00 +0100 Subject: [PATCH 236/341] IH-689: Include auto-generated record_util Signed-off-by: Andrii Sultanov --- ocaml/xapi-cli-server/record_util.ml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 2c98955fffd..fa6655b8495 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -12,11 +12,19 @@ * GNU Lesser General Public License for more details. *) (* conversion utils *) - -exception Record_failure of string - -let record_failure fmt = - Printf.ksprintf (fun msg -> raise (Record_failure msg)) fmt +(* NOTE: Unless conversion requires some custom logic, no new functions should + be added here. Automatically-generated functions with consistent behaviour + and naming are generated from the datamodel and included here. + If the custom logic is required, these functions should be shadowed and + justified here. + See: + _build/default/ocaml/xapi-cli-server/generated_record_utils.ml + for the generated code. And: + ~/xen-api/ocaml/idl/ocaml_backend/gen_api.ml + for the code generating it. +*) + +include Generated_record_utils let to_str = function Rpc.String x -> x | _ -> failwith "Invalid" From 8a4029ddd10d4700d1091e931c177a74f4356af0 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 15:09:56 +0100 Subject: [PATCH 237/341] IH-689: record_util.ml - remove manually-specified vm_power_state functions Even though vm_power_state is defined in the datamodel, the (pre-git) conversion function handles two additional variants not defined there. One usage site came to depend on this explicitly through its interface, the rest were switched to the stricter automatically-generated version. Adds a test to handle both the stricter and the non-datamodel conversion functions. record_util.ml had two practically identical versions of the conversion function, keeps only one of them, since some external code relies on its output to be lowercase. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 4 ++- ocaml/xapi-cli-server/record_util.ml | 33 +++------------------ ocaml/xapi-cli-server/records.ml | 5 +++- ocaml/xapi/import.ml | 4 +-- ocaml/xapi/xapi_vbd.ml | 10 +++++-- ocaml/xapi/xapi_vbd_helpers.ml | 14 ++++++--- ocaml/xapi/xapi_vif_helpers.ml | 4 +-- ocaml/xapi/xapi_vm.ml | 12 ++++---- ocaml/xapi/xapi_vm_clone.ml | 4 ++- ocaml/xapi/xapi_vm_clone.mli | 3 +- ocaml/xapi/xapi_vm_lifecycle.ml | 15 ++++++---- ocaml/xapi/xapi_vm_migrate.ml | 4 +-- ocaml/xapi/xapi_vusb_helpers.ml | 4 +-- ocaml/xapi/xapi_xenops.ml | 8 ++--- 14 files changed, 60 insertions(+), 64 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index a803ef7c691..4e107519965 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -114,7 +114,9 @@ let tests = (O.certificate_type_to_string, N.certificate_type_to_string) ; mk __LINE__ None all_cls (O.class_to_string, N.class_to_string) ; mk __LINE__ None all_vm_power_state - (O.power_state_to_string, N.power_state_to_string) + (O.power_state_to_string, N.vm_power_state_to_string) + ; mk __LINE__ None all_vm_power_state + (O.power_to_string, N.vm_power_state_to_lowercase_string) ; mk __LINE__ None all_vm_operations (O.vm_operation_to_string, N.vm_operation_to_string) ; mk __LINE__ None all_vm_secureboot_readiness diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index fa6655b8495..0b0f5313160 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -82,21 +82,6 @@ let string_to_class str = | _ -> failwith "Bad type" -let power_state_to_string state = - match state with - | `Halted -> - "Halted" - | `Paused -> - "Paused" - | `Running -> - "Running" - | `Suspended -> - "Suspended" - | `ShuttingDown -> - "Shutting down" - | `Migrating -> - "Migrating" - let vm_operation_table = [ (`assert_operation_valid, "assertoperationvalid") @@ -801,20 +786,10 @@ let string_to_vbd_type s = | _ -> record_failure "Expected 'CD' or 'Disk', got %s" s -let power_to_string h = - match h with - | `Halted -> - "halted" - | `Paused -> - "paused" - | `Running -> - "running" - | `Suspended -> - "suspended" - | `ShuttingDown -> - "shutting down" - | `Migrating -> - "migrating" +(* Some usage sites rely on the output of the + conversion function to be lowercase*) +let vm_power_state_to_lowercase_string h = + vm_power_state_to_string h |> String.uncapitalize_ascii let vdi_type_to_string t = match t with diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 426b04b758b..c426697bf5d 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1860,7 +1860,10 @@ let vm_record rpc session_id vm = ~get:(fun () -> string_of_bool (x ()).API.vM_is_control_domain) () ; make_field ~name:"power-state" - ~get:(fun () -> Record_util.power_to_string (x ()).API.vM_power_state) + ~get:(fun () -> + Record_util.vm_power_state_to_lowercase_string + (x ()).API.vM_power_state + ) () ; make_field ~name:"memory-actual" ~get:(fun () -> diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index bc9d3e1db0b..e8ccd86351d 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -468,8 +468,8 @@ module VM : HandlerTools = struct ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_state_to_string `Halted - ; Record_util.power_state_to_string power_state + ; Record_util.vm_power_state_to_string `Halted + ; Record_util.vm_power_state_to_string power_state ] ) ) diff --git a/ocaml/xapi/xapi_vbd.ml b/ocaml/xapi/xapi_vbd.ml index 5e1b31c5bee..4284523e2ba 100644 --- a/ocaml/xapi/xapi_vbd.ml +++ b/ocaml/xapi/xapi_vbd.ml @@ -310,10 +310,16 @@ let assert_not_suspended ~__context ~vm = if Db.VM.get_power_state ~__context ~self:vm = `Suspended then let expected = String.concat ", " - (List.map Record_util.power_to_string [`Halted; `Running]) + (List.map Record_util.vm_power_state_to_lowercase_string + [`Halted; `Running] + ) in let error_params = - [Ref.string_of vm; expected; Record_util.power_to_string `Suspended] + [ + Ref.string_of vm + ; expected + ; Record_util.vm_power_state_to_lowercase_string `Suspended + ] in raise (Api_errors.Server_error (Api_errors.vm_bad_power_state, error_params)) diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 94471108e41..690bccc390f 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -122,8 +122,8 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = set_errors Api_errors.device_already_detached [_ref] [`unplug; `unplug_force] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in (* If not Running, always block these operations: *) let bad_ops = [`plug; `unplug; `unplug_force] in (* However allow VBD pause and unpause if the VM is paused: *) @@ -199,10 +199,16 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = ( if record.Db_actions.vBD_type = `CD && power_state = `Suspended then let expected = String.concat ", " - (List.map Record_util.power_to_string [`Halted; `Running]) + (List.map Record_util.vm_power_state_to_lowercase_string + [`Halted; `Running] + ) in let error_params = - [Ref.string_of vm; expected; Record_util.power_to_string `Suspended] + [ + Ref.string_of vm + ; expected + ; Record_util.vm_power_state_to_lowercase_string `Suspended + ] in set_errors Api_errors.vm_bad_power_state error_params [`insert; `eject] (* `attach required for resume *) diff --git a/ocaml/xapi/xapi_vif_helpers.ml b/ocaml/xapi/xapi_vif_helpers.ml index 5b1f1f458f5..adafc6d888f 100644 --- a/ocaml/xapi/xapi_vif_helpers.ml +++ b/ocaml/xapi/xapi_vif_helpers.ml @@ -85,8 +85,8 @@ let valid_operations ~__context record _ref' : table = | `Running, false -> set_errors Api_errors.device_already_detached [_ref] [`unplug] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in set_errors Api_errors.vm_bad_power_state [Ref.string_of vm; expected; actual] [`plug; `unplug] diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml index eff46f84b93..cb5f616d323 100644 --- a/ocaml/xapi/xapi_vm.ml +++ b/ocaml/xapi/xapi_vm.ml @@ -411,8 +411,8 @@ let hard_reboot ~__context ~vm = ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_to_string `Running - ; Record_util.power_to_string `Suspended + ; Record_util.vm_power_state_to_lowercase_string `Running + ; Record_util.vm_power_state_to_lowercase_string `Suspended ] ) ) @@ -643,8 +643,8 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version ( vm_bad_power_state , [ Ref.string_of vm_ref - ; Record_util.power_to_string `Halted - ; Record_util.power_to_string power_state + ; Record_util.vm_power_state_to_lowercase_string `Halted + ; Record_util.vm_power_state_to_lowercase_string power_state ] ) ) ; @@ -1627,8 +1627,8 @@ let restart_device_models ~__context ~self = ( vm_bad_power_state , [ Ref.string_of self - ; Record_util.power_state_to_string `Running - ; Record_util.power_state_to_string power_state + ; Record_util.vm_power_state_to_string `Running + ; Record_util.vm_power_state_to_string power_state ] ) ) ; diff --git a/ocaml/xapi/xapi_vm_clone.ml b/ocaml/xapi/xapi_vm_clone.ml index 997dc5cfdb4..46c1e310ac2 100644 --- a/ocaml/xapi/xapi_vm_clone.ml +++ b/ocaml/xapi/xapi_vm_clone.ml @@ -231,7 +231,9 @@ let quiesced = "quiesced" let snapshot_info ~power_state ~is_a_snapshot = let power_state_info = - [(power_state_at_snapshot, Record_util.power_state_to_string power_state)] + [ + (power_state_at_snapshot, Record_util.vm_power_state_to_string power_state) + ] in if is_a_snapshot then (disk_snapshot_type, crash_consistent) :: power_state_info diff --git a/ocaml/xapi/xapi_vm_clone.mli b/ocaml/xapi/xapi_vm_clone.mli index 7105a98106a..05843952fca 100644 --- a/ocaml/xapi/xapi_vm_clone.mli +++ b/ocaml/xapi/xapi_vm_clone.mli @@ -24,8 +24,7 @@ val disk_snapshot_type : string val quiesced : string val snapshot_info : - power_state: - [< `Halted | `Migrating | `Paused | `Running | `ShuttingDown | `Suspended] + power_state:[< `Halted | `Paused | `Running | `Suspended] -> is_a_snapshot:bool -> (string * string) list diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml index 2f6130641df..914cfd15e8a 100644 --- a/ocaml/xapi/xapi_vm_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_lifecycle.ml @@ -262,9 +262,10 @@ let check_snapshot ~vmr:_ ~op ~ref_str = let report_power_state_error ~__context ~vmr ~power_state ~op ~ref_str = let expected = allowed_power_states ~__context ~vmr ~op in let expected = - String.concat ", " (List.map Record_util.power_to_string expected) + String.concat ", " + (List.map Record_util.vm_power_state_to_lowercase_string expected) in - let actual = Record_util.power_to_string power_state in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in Some (Api_errors.vm_bad_power_state, [ref_str; expected; actual]) let report_concurrent_operations_error ~current_ops ~ref_str = @@ -972,8 +973,9 @@ let assert_initial_power_state_in ~__context ~self ~allowed = ( Api_errors.vm_bad_power_state , [ Ref.string_of self - ; List.map Record_util.power_to_string allowed |> String.concat ";" - ; Record_util.power_to_string actual + ; List.map Record_util.vm_power_state_to_lowercase_string allowed + |> String.concat ";" + ; Record_util.vm_power_state_to_lowercase_string actual ] ) ) @@ -992,8 +994,9 @@ let assert_final_power_state_in ~__context ~self ~allowed = , [ "VM not in expected power state after completing operation" ; Ref.string_of self - ; List.map Record_util.power_to_string allowed |> String.concat ";" - ; Record_util.power_to_string actual + ; List.map Record_util.vm_power_state_to_lowercase_string allowed + |> String.concat ";" + ; Record_util.vm_power_state_to_lowercase_string actual ] ) ) diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index e57ef22fbad..8208cf89880 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -1805,8 +1805,8 @@ let assert_can_migrate ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~options ( Api_errors.vm_bad_power_state , [ Ref.string_of vm - ; Record_util.power_to_string `Halted - ; Record_util.power_to_string power_state + ; Record_util.vm_power_state_to_lowercase_string `Halted + ; Record_util.vm_power_state_to_lowercase_string power_state ] ) ) ; diff --git a/ocaml/xapi/xapi_vusb_helpers.ml b/ocaml/xapi/xapi_vusb_helpers.ml index 4c8b8d5eb2a..05a4024c9a6 100644 --- a/ocaml/xapi/xapi_vusb_helpers.ml +++ b/ocaml/xapi/xapi_vusb_helpers.ml @@ -69,8 +69,8 @@ let valid_operations ~__context record _ref' : table = | `Running, false -> set_errors Api_errors.device_already_detached [_ref] [`unplug] | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in + let actual = Record_util.vm_power_state_to_lowercase_string power_state in + let expected = Record_util.vm_power_state_to_lowercase_string `Running in set_errors Api_errors.vm_bad_power_state [Ref.string_of vm; expected; actual] [`plug; `unplug] diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index dfb2b666205..48c51740c77 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -44,8 +44,8 @@ let check_power_state_is ~__context ~self ~expected = if actual <> expected then warn "Potential problem: VM %s in power state '%s' when expecting '%s'" (Db.VM.get_uuid ~__context ~self) - (Record_util.power_to_string actual) - (Record_util.power_to_string expected) + (Record_util.vm_power_state_to_lowercase_string actual) + (Record_util.vm_power_state_to_lowercase_string expected) let event_wait queue_name dbg ?from p = let finished = ref false in @@ -2047,7 +2047,7 @@ let update_vm ~__context id = changed." ; should_update_allowed_operations := true ; debug "xenopsd event: Updating VM %s power_state <- %s" id - (Record_util.power_state_to_string power_state) ; + (Record_util.vm_power_state_to_string power_state) ; (* This will mark VBDs, VIFs as detached and clear resident_on if the VM has permanently shutdown. current-operations should not be reset as there maybe a checkpoint is ongoing*) @@ -3426,7 +3426,7 @@ let transform_xenops_exn ~__context ~vm queue_name f = | Bad_power_state (found, expected) -> let f x = xenapi_of_xenops_power_state (Some x) - |> Record_util.power_state_to_string + |> Record_util.vm_power_state_to_string in let found = f found and expected = f expected in reraise Api_errors.vm_bad_power_state From 94dcead023d3fb6fd7fa40244a35f8bbe708baa0 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:08:34 +0100 Subject: [PATCH 238/341] IH-689: record_util.ml - move to autogenerated vdi_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 36 --------------------- ocaml/xapi-cli-server/records.ml | 8 ++--- ocaml/xapi/xapi_vbd_helpers.ml | 2 +- ocaml/xapi/xapi_vdi.ml | 4 +-- 5 files changed, 8 insertions(+), 44 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 4e107519965..62f30da3fc2 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -136,7 +136,7 @@ let tests = , N.latest_synced_updates_applied_state_to_string ) ; mk __LINE__ None all_vdi_operations - (O.vdi_operation_to_string, N.vdi_operation_to_string) + (O.vdi_operation_to_string, N.vdi_operations_to_string) ; mk __LINE__ None all_storage_operations (O.sr_operation_to_string, N.sr_operation_to_string) ; mk __LINE__ None all_vbd_operations diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 0b0f5313160..51c2717c630 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -257,42 +257,6 @@ let latest_synced_updates_applied_state_to_string = function | `unknown -> "unknown" -let vdi_operation_to_string : API.vdi_operations -> string = function - | `clone -> - "clone" - | `copy -> - "copy" - | `resize -> - "resize" - | `resize_online -> - "resize_online" - | `destroy -> - "destroy" - | `force_unlock -> - "force_unlock" - | `snapshot -> - "snapshot" - | `mirror -> - "mirror" - | `forget -> - "forget" - | `update -> - "update" - | `generate_config -> - "generate_config" - | `enable_cbt -> - "enable_cbt" - | `disable_cbt -> - "disable_cbt" - | `data_destroy -> - "data_destroy" - | `list_changed_blocks -> - "list_changed_blocks" - | `set_on_boot -> - "set_on_boot" - | `blocked -> - "blocked" - let sr_operation_to_string : API.storage_operations -> string = function | `scan -> "scan" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index c426697bf5d..1eeb80ff089 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -3266,23 +3266,23 @@ let vdi_record rpc session_id vdi = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vdi_operation_to_string + map_and_concat Record_util.vdi_operations_to_string (x ()).API.vDI_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vdi_operation_to_string + List.map Record_util.vdi_operations_to_string (x ()).API.vDI_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vdi_operation_to_string b) + (fun (_, b) -> Record_util.vdi_operations_to_string b) (x ()).API.vDI_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vdi_operation_to_string b) + (fun (_, b) -> Record_util.vdi_operations_to_string b) (x ()).API.vDI_current_operations ) () diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 690bccc390f..4973fb5c8b7 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -235,7 +235,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = snd (List.hd vdi_record.Db_actions.vDI_current_operations) in set_errors Api_errors.other_operation_in_progress - ["VDI"; Ref.string_of vdi; vdi_operation_to_string concurrent_op] + ["VDI"; Ref.string_of vdi; vdi_operations_to_string concurrent_op] [`attach; `plug; `insert] ) ; if diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index f2f1ed12688..ab8c543a36a 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -247,7 +247,7 @@ let check_operation_error ~__context ?sr_records:_ ?(pbd_records = []) if blocked_by_attach then Some ( Api_errors.vdi_in_use - , [_ref; Record_util.vdi_operation_to_string op] + , [_ref; Record_util.vdi_operations_to_string op] ) else if (* data_destroy first waits for all the VBDs to disappear in its @@ -961,7 +961,7 @@ let wait_for_vbds_to_be_unplugged_and_destroyed ~__context ~self ~timeout = ( Api_errors.vdi_in_use , [ Ref.string_of self - ; Record_util.vdi_operation_to_string `data_destroy + ; Record_util.vdi_operations_to_string `data_destroy ] ) ) From 971119a449abeea71d6d5bc09c5c24a1214af891 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:13:55 +0100 Subject: [PATCH 239/341] IH-689: record_util.ml - move to autogenerated sdn_controller_protocol Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 4 ++-- ocaml/xapi-cli-server/cli_operations.ml | 3 ++- ocaml/xapi-cli-server/record_util.ml | 11 ----------- ocaml/xapi-cli-server/records.ml | 2 +- ocaml/xapi/message_forwarding.ml | 2 +- 5 files changed, 6 insertions(+), 16 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 62f30da3fc2..e748d612a35 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -246,9 +246,9 @@ let tests = ; mk __LINE__ None all_cluster_host_operation (O.cluster_host_operation_to_string, N.cluster_host_operation_to_string) ; mk __LINE__ - (Some (O.sdn_protocol_of_string, N.sdn_protocol_of_string)) + (Some (O.sdn_protocol_of_string, N.sdn_controller_protocol_of_string)) all_sdn_controller_protocol - (O.sdn_protocol_to_string, N.sdn_protocol_to_string) + (O.sdn_protocol_to_string, N.sdn_controller_protocol_to_string) ; mk __LINE__ (Some (O.tunnel_protocol_of_string, N.tunnel_protocol_of_string)) all_tunnel_protocol diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index 7c693a7a25c..f715f70da7c 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -7753,7 +7753,8 @@ module SDN_controller = struct in let protocol = if List.mem_assoc "protocol" params then - Record_util.sdn_protocol_of_string (List.assoc "protocol" params) + Record_util.sdn_controller_protocol_of_string + (List.assoc "protocol" params) else `ssl in diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 51c2717c630..b4307080eef 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -928,17 +928,6 @@ let bool_of_string s = record_failure "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s -let sdn_protocol_of_string s = - match String.lowercase_ascii s with - | "ssl" -> - `ssl - | "pssl" -> - `pssl - | _ -> - record_failure "Expected 'ssl','pssl', got %s" s - -let sdn_protocol_to_string = function `ssl -> "ssl" | `pssl -> "pssl" - let tunnel_protocol_of_string s = match String.lowercase_ascii s with | "gre" -> diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 1eeb80ff089..c094186429e 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -4730,7 +4730,7 @@ let sdn_controller_record rpc session_id sdn_controller = () ; make_field ~name:"protocol" ~get:(fun () -> - Record_util.sdn_protocol_to_string + Record_util.sdn_controller_protocol_to_string (x ()).API.sDN_controller_protocol ) () diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index ce6e69ef54e..a1c900f212c 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -6200,7 +6200,7 @@ functor module SDN_controller = struct let introduce ~__context ~protocol ~address ~port = info "SDN_controller.introduce: protocol='%s', address='%s', port='%Ld'" - (Record_util.sdn_protocol_to_string protocol) + (Record_util.sdn_controller_protocol_to_string protocol) address port ; Local.SDN_controller.introduce ~__context ~protocol ~address ~port From b4e207212545644ccd27e6931e2bf6d993f29ff7 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:20:55 +0100 Subject: [PATCH 240/341] IH-689: record_util.ml - move to autogenerated vusb_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 8 -------- ocaml/xapi-cli-server/records.ml | 8 ++++---- ocaml/xapi/xapi_vusb_helpers.ml | 6 +++--- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index e748d612a35..61b4f0cb708 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -256,7 +256,7 @@ let tests = ; mk __LINE__ None all_pif_igmp_status (O.pif_igmp_status_to_string, N.pif_igmp_status_to_string) ; mk __LINE__ None all_vusb_operations - (O.vusb_operation_to_string, N.vusb_operation_to_string) + (O.vusb_operation_to_string, N.vusb_operations_to_string) ; mk __LINE__ None all_sriov_configuration_mode ( O.network_sriov_configuration_mode_to_string , N.network_sriov_configuration_mode_to_string diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index b4307080eef..b02eba31fde 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -947,14 +947,6 @@ let pif_igmp_status_to_string = function | `unknown -> "unknown" -let vusb_operation_to_string = function - | `attach -> - "attach" - | `plug -> - "plug" - | `unplug -> - "unplug" - let network_sriov_configuration_mode_to_string = function | `sysfs -> "sysfs" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index c094186429e..ee9f7ae80c4 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -4935,23 +4935,23 @@ let vusb_record rpc session_id vusb = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vusb_operation_to_string + map_and_concat Record_util.vusb_operations_to_string (x ()).API.vUSB_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vusb_operation_to_string + List.map Record_util.vusb_operations_to_string (x ()).API.vUSB_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vusb_operation_to_string b) + (fun (_, b) -> Record_util.vusb_operations_to_string b) (x ()).API.vUSB_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vusb_operation_to_string b) + (fun (_, b) -> Record_util.vusb_operations_to_string b) (x ()).API.vUSB_current_operations ) () diff --git a/ocaml/xapi/xapi_vusb_helpers.ml b/ocaml/xapi/xapi_vusb_helpers.ml index 05a4024c9a6..2b9e0805865 100644 --- a/ocaml/xapi/xapi_vusb_helpers.ml +++ b/ocaml/xapi/xapi_vusb_helpers.ml @@ -52,13 +52,13 @@ let valid_operations ~__context record _ref' : table = debug "No operations are valid because current-operations = [ %s ]" (String.concat "; " (List.map - (fun (task, op) -> task ^ " -> " ^ vusb_operation_to_string op) + (fun (task, op) -> task ^ " -> " ^ vusb_operations_to_string op) current_ops ) ) ; let concurrent_op = snd (List.hd current_ops) in set_errors Api_errors.other_operation_in_progress - ["VUSB"; _ref; vusb_operation_to_string concurrent_op] + ["VUSB"; _ref; vusb_operations_to_string concurrent_op] all_ops ) ; let vm = Db.VUSB.get_VM ~__context ~self:_ref' in @@ -101,7 +101,7 @@ let throw_error (table : table) op = Printf.sprintf "xapi_vusb_helpers.assert_operation_valid unknown operation: \ %s" - (vusb_operation_to_string op) + (vusb_operations_to_string op) ] ) ) From 7fe6856cd31e22e6988e0aa05b5f98c25e069342 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:24:09 +0100 Subject: [PATCH 241/341] IH-689: record_util.ml - move to autogenerated vbd_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 18 ------------------ ocaml/xapi-cli-server/records.ml | 8 ++++---- ocaml/xapi/xapi_vbd_helpers.ml | 8 ++++---- 4 files changed, 9 insertions(+), 27 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 61b4f0cb708..ac620ebc6a3 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -140,7 +140,7 @@ let tests = ; mk __LINE__ None all_storage_operations (O.sr_operation_to_string, N.sr_operation_to_string) ; mk __LINE__ None all_vbd_operations - (O.vbd_operation_to_string, N.vbd_operation_to_string) + (O.vbd_operation_to_string, N.vbd_operations_to_string) ; mk __LINE__ None all_vif_operations (O.vif_operation_to_string, N.vif_operation_to_string) ; mk __LINE__ None all_vif_locking_mode diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index b02eba31fde..afd021f2227 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -299,24 +299,6 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let vbd_operation_to_string = function - | `attach -> - "attach" - | `eject -> - "eject" - | `insert -> - "insert" - | `plug -> - "plug" - | `unplug -> - "unplug" - | `unplug_force -> - "unplug_force" - | `pause -> - "pause" - | `unpause -> - "unpause" - let vif_operation_to_string = function | `attach -> "attach" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index ee9f7ae80c4..cf4a6dde80a 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -3446,23 +3446,23 @@ let vbd_record rpc session_id vbd = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vbd_operation_to_string + map_and_concat Record_util.vbd_operations_to_string (x ()).API.vBD_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vbd_operation_to_string + List.map Record_util.vbd_operations_to_string (x ()).API.vBD_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vbd_operation_to_string b) + (fun (_, b) -> Record_util.vbd_operations_to_string b) (x ()).API.vBD_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vbd_operation_to_string b) + (fun (_, b) -> Record_util.vbd_operations_to_string b) (x ()).API.vBD_current_operations ) () diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 4973fb5c8b7..3794d2c1fb7 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -76,7 +76,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = ( if current_ops <> [] then let concurrent_op = List.hd current_ops in set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string concurrent_op] + ["VBD"; _ref; vbd_operations_to_string concurrent_op] (Listext.List.set_difference all_ops safe_to_parallelise) ) ; (* If not all operations are parallisable then preclude pause *) @@ -88,7 +88,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = parallelisable operations too *) if not all_are_parallelisable then set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string (List.hd current_ops)] + ["VBD"; _ref; vbd_operations_to_string (List.hd current_ops)] [`pause] ; (* If something other than `pause `unpause *and* `attach (for VM.reboot, see CA-24282) then disallow unpause *) if @@ -96,7 +96,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = <> [] then set_errors Api_errors.other_operation_in_progress - ["VBD"; _ref; vbd_operation_to_string (List.hd current_ops)] + ["VBD"; _ref; vbd_operations_to_string (List.hd current_ops)] [`unpause] ; (* Drives marked as not unpluggable cannot be unplugged *) if not record.Db_actions.vBD_unpluggable then @@ -313,7 +313,7 @@ let throw_error (table : table) op = , [ Printf.sprintf "xapi_vbd_helpers.assert_operation_valid unknown operation: %s" - (vbd_operation_to_string op) + (vbd_operations_to_string op) ] ) ) From 9bbfef10af32c3249475900bfa2fcb0a1e0e5fa3 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:26:31 +0100 Subject: [PATCH 242/341] IH-689: record_util.ml - move to autogenerated pool_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 32 --------------------- ocaml/xapi-cli-server/records.ml | 8 +++--- ocaml/xapi/xapi_pool_helpers.ml | 4 +-- 4 files changed, 7 insertions(+), 39 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index ac620ebc6a3..8f3f6427935 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -126,7 +126,7 @@ let tests = , N.pool_guest_secureboot_readiness_to_string ) ; mk __LINE__ None all_pool_allowed_operations - (O.pool_operation_to_string, N.pool_operation_to_string) + (O.pool_operation_to_string, N.pool_allowed_operations_to_string) ; mk __LINE__ None all_host_allowed_operations (O.host_operation_to_string, N.host_operation_to_string) ; mk __LINE__ None all_update_guidances diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index afd021f2227..238f7dadaa6 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -179,38 +179,6 @@ let pool_guest_secureboot_readiness_to_string = function | `not_ready -> "not_ready" -let pool_operation_to_string = function - | `ha_enable -> - "ha_enable" - | `ha_disable -> - "ha_disable" - | `cluster_create -> - "cluster_create" - | `designate_new_master -> - "designate_new_master" - | `tls_verification_enable -> - "tls_verification_enable" - | `configure_repositories -> - "configure_repositories" - | `sync_updates -> - "sync_updates" - | `sync_bundle -> - "sync_bundle" - | `get_updates -> - "get_updates" - | `apply_updates -> - "apply_updates" - | `cert_refresh -> - "cert_refresh" - | `exchange_certificates_on_join -> - "exchange_certificates_on_join" - | `exchange_ca_certificates_on_join -> - "exchange_ca_certificates_on_join" - | `copy_primary_host_certs -> - "copy_primary_host_certs" - | `eject -> - "eject" - let host_operation_to_string = function | `provision -> "provision" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index cf4a6dde80a..e134a77f772 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1189,23 +1189,23 @@ let pool_record rpc session_id pool = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.pool_operation_to_string + map_and_concat Record_util.pool_allowed_operations_to_string (x ()).API.pool_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.pool_operation_to_string + List.map Record_util.pool_allowed_operations_to_string (x ()).API.pool_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.pool_operation_to_string b) + (fun (_, b) -> Record_util.pool_allowed_operations_to_string b) (x ()).API.pool_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.pool_operation_to_string b) + (fun (_, b) -> Record_util.pool_allowed_operations_to_string b) (x ()).API.pool_current_operations ) () diff --git a/ocaml/xapi/xapi_pool_helpers.ml b/ocaml/xapi/xapi_pool_helpers.ml index 16309c7bd51..ec281ade966 100644 --- a/ocaml/xapi/xapi_pool_helpers.ml +++ b/ocaml/xapi/xapi_pool_helpers.ml @@ -138,7 +138,7 @@ let throw_error table op = Printf.sprintf "xapi_pool_helpers.assert_operation_valid unknown operation: \ %s" - (pool_operation_to_string op) + (pool_allowed_operations_to_string op) ] ) ) @@ -202,7 +202,7 @@ let assert_no_pool_ops ~__context = let err = ops |> List.map snd - |> List.map Record_util.pool_operation_to_string + |> List.map Record_util.pool_allowed_operations_to_string |> String.concat "; " |> Printf.sprintf "pool operations in progress: [ %s ]" in From 66c4c10a3625643b95778efa71bd0934db3fba5a Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 16:28:41 +0100 Subject: [PATCH 243/341] IH-689: record_util.ml - move to autogenerated sriov_configuration_mode Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 10 ---------- ocaml/xapi/xapi_network_sriov_helpers.ml | 2 +- 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 8f3f6427935..7de67fe089a 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -259,7 +259,7 @@ let tests = (O.vusb_operation_to_string, N.vusb_operations_to_string) ; mk __LINE__ None all_sriov_configuration_mode ( O.network_sriov_configuration_mode_to_string - , N.network_sriov_configuration_mode_to_string + , N.sriov_configuration_mode_to_string ) ; mk __LINE__ None all_on_boot (O.on_boot_to_string, N.on_boot_to_string) ; mk __LINE__ None all_tristate_type diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 238f7dadaa6..90be785ceca 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -897,16 +897,6 @@ let pif_igmp_status_to_string = function | `unknown -> "unknown" -let network_sriov_configuration_mode_to_string = function - | `sysfs -> - "sysfs" - | `modprobe -> - "modprobe" - | `manual -> - "manual" - | `unknown -> - "unknown" - let on_boot_to_string onboot = match onboot with `reset -> "reset" | `persist -> "persist" diff --git a/ocaml/xapi/xapi_network_sriov_helpers.ml b/ocaml/xapi/xapi_network_sriov_helpers.ml index 952a7c35270..6600f6a2f44 100644 --- a/ocaml/xapi/xapi_network_sriov_helpers.ml +++ b/ocaml/xapi/xapi_network_sriov_helpers.ml @@ -56,7 +56,7 @@ let sriov_bring_up ~__context ~self = in info "Enable network sriov on PIF %s successful, mode: %s need_reboot: %b" (Ref.string_of physical_pif) - (Record_util.network_sriov_configuration_mode_to_string mode) + (Record_util.sriov_configuration_mode_to_string mode) require_reboot ; Db.Network_sriov.set_configuration_mode ~__context ~self:sriov ~value:mode ; Db.Network_sriov.set_requires_reboot ~__context ~self:sriov From 27f827b3da6e32f3c17e7bc7cc20a1cd97418b93 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 09:03:36 +0100 Subject: [PATCH 244/341] IH-689: record_util.ml - move to autogenerated update_guidances Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 16 ---------------- ocaml/xapi-cli-server/records.ml | 12 ++++++------ 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 7de67fe089a..14e6ca768a1 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -130,7 +130,7 @@ let tests = ; mk __LINE__ None all_host_allowed_operations (O.host_operation_to_string, N.host_operation_to_string) ; mk __LINE__ None all_update_guidances - (O.update_guidance_to_string, N.update_guidance_to_string) + (O.update_guidance_to_string, N.update_guidances_to_string) ; mk __LINE__ None all_latest_synced_updates_applied_state ( O.latest_synced_updates_applied_state_to_string , N.latest_synced_updates_applied_state_to_string diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 90be785ceca..3a76f7f7997 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -201,22 +201,6 @@ let host_operation_to_string = function | `enable -> "enable" -let update_guidance_to_string = function - | `reboot_host -> - "reboot_host" - | `reboot_host_on_livepatch_failure -> - "reboot_host_on_livepatch_failure" - | `reboot_host_on_kernel_livepatch_failure -> - "reboot_host_on_kernel_livepatch_failure" - | `reboot_host_on_xen_livepatch_failure -> - "reboot_host_on_xen_livepatch_failure" - | `restart_toolstack -> - "restart_toolstack" - | `restart_device_model -> - "restart_device_model" - | `restart_vm -> - "restart_vm" - let latest_synced_updates_applied_state_to_string = function | `yes -> "yes" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index e134a77f772..69e2da16c60 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -2547,7 +2547,7 @@ let vm_record rpc session_id vm = () ; make_field ~name:"pending-guidances" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances ) () @@ -2556,13 +2556,13 @@ let vm_record rpc session_id vm = () ; make_field ~name:"pending-guidances-recommended" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances_recommended ) () ; make_field ~name:"pending-guidances-full" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.vM_pending_guidances_full ) () @@ -3181,7 +3181,7 @@ let host_record rpc session_id host = () ; make_field ~name:"pending-guidances" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances ) () @@ -3201,13 +3201,13 @@ let host_record rpc session_id host = () ; make_field ~name:"pending-guidances-recommended" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances_recommended ) () ; make_field ~name:"pending-guidances-full" ~get:(fun () -> - map_and_concat Record_util.update_guidance_to_string + map_and_concat Record_util.update_guidances_to_string (x ()).API.host_pending_guidances_full ) () From 687d757a829bfd04a9c332677b49fac042986570 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 14:36:44 +0100 Subject: [PATCH 245/341] IH-689: record_util.ml - move to autogenerated vif_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 10 ---------- ocaml/xapi-cli-server/records.ml | 8 ++++---- ocaml/xapi/message_forwarding.ml | 2 +- ocaml/xapi/xapi_vif_helpers.ml | 6 +++--- 5 files changed, 9 insertions(+), 19 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 14e6ca768a1..8167fad07a1 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -142,7 +142,7 @@ let tests = ; mk __LINE__ None all_vbd_operations (O.vbd_operation_to_string, N.vbd_operations_to_string) ; mk __LINE__ None all_vif_operations - (O.vif_operation_to_string, N.vif_operation_to_string) + (O.vif_operation_to_string, N.vif_operations_to_string) ; mk __LINE__ None all_vif_locking_mode (O.vif_locking_mode_to_string, N.vif_locking_mode_to_string) ; mk __LINE__ None all_vmss_type (O.vmss_type_to_string, N.vmss_type_to_string) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 3a76f7f7997..9b082a2dbc2 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -251,16 +251,6 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let vif_operation_to_string = function - | `attach -> - "attach" - | `plug -> - "plug" - | `unplug -> - "unplug" - | `unplug_force -> - "unplug_force" - let vif_locking_mode_to_string = function | `network_default -> "network_default" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 69e2da16c60..632678b271f 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -816,23 +816,23 @@ let vif_record rpc session_id vif = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vif_operation_to_string + map_and_concat Record_util.vif_operations_to_string (x ()).API.vIF_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vif_operation_to_string + List.map Record_util.vif_operations_to_string (x ()).API.vIF_allowed_operations ) () ; make_field ~name:"current-operations" ~get:(fun () -> map_and_concat - (fun (_, b) -> Record_util.vif_operation_to_string b) + (fun (_, b) -> Record_util.vif_operations_to_string b) (x ()).API.vIF_current_operations ) ~get_set:(fun () -> List.map - (fun (_, b) -> Record_util.vif_operation_to_string b) + (fun (_, b) -> Record_util.vif_operations_to_string b) (x ()).API.vIF_current_operations ) () diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index a1c900f212c..4d454ecd338 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -4364,7 +4364,7 @@ functor let unplug_common ~__context ~self ~force = let op = `unplug in - let name = "VIF." ^ Record_util.vif_operation_to_string op in + let name = "VIF." ^ Record_util.vif_operations_to_string op in info "%s: VIF = '%s'" name (vif_uuid ~__context self) ; let local_fn, remote_fn = if force then diff --git a/ocaml/xapi/xapi_vif_helpers.ml b/ocaml/xapi/xapi_vif_helpers.ml index adafc6d888f..b7fd5eadd2d 100644 --- a/ocaml/xapi/xapi_vif_helpers.ml +++ b/ocaml/xapi/xapi_vif_helpers.ml @@ -54,13 +54,13 @@ let valid_operations ~__context record _ref' : table = debug "No operations are valid because current-operations = [ %s ]" (String.concat "; " (List.map - (fun (task, op) -> task ^ " -> " ^ vif_operation_to_string op) + (fun (task, op) -> task ^ " -> " ^ vif_operations_to_string op) current_ops ) ) ; let concurrent_op = snd (List.hd current_ops) in set_errors Api_errors.other_operation_in_progress - ["VIF"; _ref; vif_operation_to_string concurrent_op] + ["VIF"; _ref; vif_operations_to_string concurrent_op] all_ops ) ; (* No hotplug on dom0 *) @@ -163,7 +163,7 @@ let throw_error (table : table) op = , [ Printf.sprintf "xapi_vif_helpers.assert_operation_valid unknown operation: %s" - (vif_operation_to_string op) + (vif_operations_to_string op) ] ) ) From 2859b38e0e9dd27885502e86543056dd6d283f7f Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 13:28:15 +0100 Subject: [PATCH 246/341] IH-689: record_util.ml - move to autogenerated repo_origin Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 3 +-- ocaml/xapi-cli-server/record_util.ml | 2 -- ocaml/xapi-cli-server/records.ml | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 8167fad07a1..6412268d1fe 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -280,8 +280,7 @@ let tests = (Some (O.vm_placement_policy_of_string, N.vm_placement_policy_of_string)) all_placement_policy (O.vm_placement_policy_to_string, N.vm_placement_policy_to_string) - ; mk __LINE__ None all_origin - (O.repo_origin_to_string, N.repo_origin_to_string) + ; mk __LINE__ None all_origin (O.repo_origin_to_string, N.origin_to_string) ] |> List.concat diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 9b082a2dbc2..fb249f242ca 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -981,5 +981,3 @@ let vm_placement_policy_of_string a = `anti_affinity | s -> record_failure "Invalid VM placement policy, got %s" s - -let repo_origin_to_string = function `remote -> "remote" | `bundle -> "bundle" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 632678b271f..7efa656f819 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -5245,7 +5245,7 @@ let repository_record rpc session_id repository = () ; make_field ~name:"origin" ~get:(fun () -> - Record_util.repo_origin_to_string (x ()).API.repository_origin + Record_util.origin_to_string (x ()).API.repository_origin ) () ] From ac679eef20289064f857d44ebf972fa39d8dfa1e Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 13:29:55 +0100 Subject: [PATCH 247/341] IH-689: record_util.ml - move to autogenerated vtpm_operations Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 2 +- ocaml/xapi-cli-server/record_util.ml | 3 --- ocaml/xapi-cli-server/records.ml | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 6412268d1fe..365fad71f54 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -269,7 +269,7 @@ let tests = all_domain_type (O.domain_type_to_string, N.domain_type_to_string) ; mk __LINE__ None all_vtpm_operations - (O.vtpm_operation_to_string, N.vtpm_operation_to_string) + (O.vtpm_operation_to_string, N.vtpm_operations_to_string) ; mk __LINE__ (Some (O.update_sync_frequency_of_string, N.update_sync_frequency_of_string) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index fb249f242ca..bf55c892345 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -908,9 +908,6 @@ let domain_type_of_string x = | s -> record_failure "Invalid domain type. Got %s" s -let vtpm_operation_to_string (op : API.vtpm_operations) = - match op with `destroy -> "destroy" - (** parse [0-9]*(b|bytes|kib|mib|gib|tib)* to bytes *) let bytes_of_string str = let ( ** ) a b = Int64.mul a b in diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 7efa656f819..8a5b975b176 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -5295,11 +5295,11 @@ let vtpm_record rpc session_id vtpm = () ; make_field ~name:"allowed-operations" ~get:(fun () -> - map_and_concat Record_util.vtpm_operation_to_string + map_and_concat Record_util.vtpm_operations_to_string (x ()).API.vTPM_allowed_operations ) ~get_set:(fun () -> - List.map Record_util.vtpm_operation_to_string + List.map Record_util.vtpm_operations_to_string (x ()).API.vTPM_allowed_operations ) () From 1e1aa6ae24b9f6d72e2c01c073b9d3fe3aa9eb50 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 10:16:55 +0100 Subject: [PATCH 248/341] IH-689: record_util.ml - remove manually-specified vbd_mode and vbd_type vbd_mode was not testing of_string variants because they were not named according to the convention and were not picked up by the test-generating script. vbd_type was not being tested because it did not have a to_string function, only an of_string one. An automatically generated to_string function is used in the test instead. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 9 ++++++++- ocaml/xapi-cli-server/record_util.ml | 21 +-------------------- ocaml/xapi-cli-server/records.ml | 4 ++-- 3 files changed, 11 insertions(+), 23 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 365fad71f54..048be9da9bb 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -190,7 +190,14 @@ let tests = ) ; mk __LINE__ None all_pci_dom0_access (O.pci_dom0_access_to_string, N.pci_dom0_access_to_string) - ; mk __LINE__ None all_vbd_mode (O.vbd_mode_to_string, N.vbd_mode_to_string) + ; mk __LINE__ + (Some (O.string_to_vbd_mode, N.vbd_mode_of_string)) + all_vbd_mode + (O.vbd_mode_to_string, N.vbd_mode_to_string) + ; mk __LINE__ + (Some (O.string_to_vbd_type, N.vbd_type_of_string)) + all_vbd_type + (N.vbd_type_to_string, N.vbd_type_to_string) (*; mk __LINE__ None all_power (O.power_to_string, N.power_to_string)*) ; mk __LINE__ None all_vdi_type (O.vdi_type_to_string, N.vdi_type_to_string) ; mk __LINE__ diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index bf55c892345..fb8bac8d1ec 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -652,28 +652,9 @@ let string_to_vdi_onboot s = | _ -> record_failure "Expected 'persist' or 'reset', got %s" s -let string_to_vbd_mode s = - match String.lowercase_ascii s with - | "ro" -> - `RO - | "rw" -> - `RW - | _ -> - record_failure "Expected 'RO' or 'RW', got %s" s - +(* Intentional shadowing - inconsistent capitalization *) let vbd_mode_to_string = function `RO -> "ro" | `RW -> "rw" -let string_to_vbd_type s = - match String.lowercase_ascii s with - | "cd" -> - `CD - | "disk" -> - `Disk - | "floppy" -> - `Floppy - | _ -> - record_failure "Expected 'CD' or 'Disk', got %s" s - (* Some usage sites rely on the output of the conversion function to be lowercase*) let vm_power_state_to_lowercase_string h = diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 8a5b975b176..d39d9bf84e4 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -3489,7 +3489,7 @@ let vbd_record rpc session_id vbd = ) ~set:(fun mode -> Client.VBD.set_mode ~rpc ~session_id ~self:vbd - ~value:(Record_util.string_to_vbd_mode mode) + ~value:(Record_util.vbd_mode_of_string mode) ) () ; make_field ~name:"type" @@ -3504,7 +3504,7 @@ let vbd_record rpc session_id vbd = ) ~set:(fun ty -> Client.VBD.set_type ~rpc ~session_id ~self:vbd - ~value:(Record_util.string_to_vbd_type ty) + ~value:(Record_util.vbd_type_of_string ty) ) () ; make_field ~name:"unpluggable" From 7770369843a9a2004f7e44fa94f71237b3849da6 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 22 Aug 2024 15:55:51 +0100 Subject: [PATCH 249/341] IH-689: record_util.ml - move to autogenerated cls conversion functions Expands tests since these did not consider the 'string_to_X' form of conversion functions. Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 5 ++- ocaml/xapi-cli-server/record_util.ml | 46 --------------------- ocaml/xapi-cli-server/records.ml | 2 +- ocaml/xapi/xapi_message.ml | 14 +++---- 4 files changed, 12 insertions(+), 55 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 048be9da9bb..1c9815523fb 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -112,7 +112,10 @@ let tests = [ mk __LINE__ None all_certificate_type (O.certificate_type_to_string, N.certificate_type_to_string) - ; mk __LINE__ None all_cls (O.class_to_string, N.class_to_string) + ; mk __LINE__ + (Some (O.string_to_class, N.cls_of_string)) + all_cls + (O.class_to_string, N.cls_to_string) ; mk __LINE__ None all_vm_power_state (O.power_state_to_string, N.vm_power_state_to_string) ; mk __LINE__ None all_vm_power_state diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index fb8bac8d1ec..70a9e0912ab 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -36,52 +36,6 @@ let certificate_type_to_string = function | `ca -> "ca" -let class_to_string cls = - match cls with - | `VM -> - "VM" - | `Host -> - "Host" - | `SR -> - "SR" - | `Pool -> - "Pool" - | `VMPP -> - "VMPP" - | `VMSS -> - "VMSS" - | `PVS_proxy -> - "PVS_proxy" - | `VDI -> - "VDI" - | `Certificate -> - "Certificate" - | _ -> - "unknown" - -let string_to_class str = - match str with - | "VM" -> - `VM - | "Host" -> - `Host - | "SR" -> - `SR - | "Pool" -> - `Pool - | "VMPP" -> - `VMPP - | "VMSS" -> - `VMSS - | "PVS_proxy" -> - `PVS_proxy - | "VDI" -> - `VDI - | "Certificate" -> - `Certificate - | _ -> - failwith "Bad type" - let vm_operation_table = [ (`assert_operation_valid, "assertoperationvalid") diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index d39d9bf84e4..22c01f8e63e 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -370,7 +370,7 @@ let message_record rpc session_id message = ~get:(fun () -> Int64.to_string (x ()).API.message_priority) () ; make_field ~name:"class" - ~get:(fun () -> Record_util.class_to_string (x ()).API.message_cls) + ~get:(fun () -> Record_util.cls_to_string (x ()).API.message_cls) () ; make_field ~name:"obj-uuid" ~get:(fun () -> (x ()).API.message_obj_uuid) diff --git a/ocaml/xapi/xapi_message.ml b/ocaml/xapi/xapi_message.ml index 50621a9aa9c..8d81bef3777 100644 --- a/ocaml/xapi/xapi_message.ml +++ b/ocaml/xapi/xapi_message.ml @@ -71,7 +71,7 @@ let to_xml output _ref gen message = tag "ref" [data (Ref.string_of _ref)] ; tag "name" [data message.API.message_name] ; tag "priority" [data (Int64.to_string message.API.message_priority)] - ; tag "cls" [data (Record_util.class_to_string message.API.message_cls)] + ; tag "cls" [data (Record_util.cls_to_string message.API.message_cls)] ; tag "obj_uuid" [data message.API.message_obj_uuid] ; tag "timestamp" [data (Date.to_string message.API.message_timestamp)] ; tag "uuid" [data message.API.message_uuid] @@ -119,7 +119,7 @@ let of_xml input = message := {!message with API.message_priority= Int64.of_string dat} | "cls" -> message := - {!message with API.message_cls= Record_util.string_to_class dat} + {!message with API.message_cls= Record_util.cls_of_string dat} | "obj_uuid" -> message := {!message with API.message_obj_uuid= dat} | "timestamp" -> @@ -188,7 +188,7 @@ let import_xml xml_in = (********** Symlink functions *************) let class_symlink cls obj_uuid = - let strcls = Record_util.class_to_string cls in + let strcls = Record_util.cls_to_string cls in Printf.sprintf "%s/%s/%s" message_dir strcls obj_uuid let uuid_symlink () = Printf.sprintf "%s/uuids" message_dir @@ -411,14 +411,14 @@ let write ~__context ~_ref ~message = if write failed, or message ref otherwise. *) let create ~__context ~name ~priority ~cls ~obj_uuid ~body = debug "Message.create %s %Ld %s %s" name priority - (Record_util.class_to_string cls) + (Record_util.cls_to_string cls) obj_uuid ; if not (Encodings.UTF8_XML.is_valid body) then raise (Api_errors.Server_error (Api_errors.invalid_value, ["UTF8 expected"])) ; if not (check_uuid ~__context ~cls ~uuid:obj_uuid) then raise (Api_errors.Server_error - (Api_errors.uuid_invalid, [Record_util.class_to_string cls; obj_uuid]) + (Api_errors.uuid_invalid, [Record_util.cls_to_string cls; obj_uuid]) ) ; let _ref = Ref.make () in let uuid = Uuidx.to_string (Uuidx.make ()) in @@ -800,7 +800,7 @@ let handler (req : Http.Request.t) fd _ = else (* Get and check query parameters *) let uuid = List.assoc "uuid" query and cls = List.assoc "cls" query in let cls = - try Record_util.string_to_class cls + try Record_util.cls_of_string cls with _ -> failwith ("Xapi_message.handler: Bad class " ^ cls) in if not (check_uuid ~__context ~cls ~uuid) then @@ -829,7 +829,7 @@ let send_messages ~__context ~cls ~obj_uuid ~session_id ~remote_address let query = [ ("session_id", Ref.string_of session_id) - ; ("cls", Record_util.class_to_string cls) + ; ("cls", Record_util.cls_to_string cls) ; ("uuid", obj_uuid) ] in From bc1504d56e086351357bac7565bf7a18d1254525 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 09:06:56 +0100 Subject: [PATCH 250/341] IH-689: record_util.ml - move to autogenerated vmss_{type,frequency} Expands tests since these did not consider the 'string_to_X' form of conversion functions. Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 9 +++-- ocaml/xapi-cli-server/cli_operations.ml | 4 +-- ocaml/xapi-cli-server/record_util.ml | 37 --------------------- ocaml/xapi-cli-server/records.ml | 4 +-- 4 files changed, 11 insertions(+), 43 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 1c9815523fb..c4459f2c4ec 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -148,8 +148,13 @@ let tests = (O.vif_operation_to_string, N.vif_operations_to_string) ; mk __LINE__ None all_vif_locking_mode (O.vif_locking_mode_to_string, N.vif_locking_mode_to_string) - ; mk __LINE__ None all_vmss_type (O.vmss_type_to_string, N.vmss_type_to_string) - ; mk __LINE__ None all_vmss_frequency + ; mk __LINE__ + (Some (O.string_to_vmss_type, N.vmss_type_of_string)) + all_vmss_type + (O.vmss_type_to_string, N.vmss_type_to_string) + ; mk __LINE__ + (Some (O.string_to_vmss_frequency, N.vmss_frequency_of_string)) + all_vmss_frequency (O.vmss_frequency_to_string, N.vmss_frequency_to_string) ; mk __LINE__ None all_network_default_locking_mode ( O.network_default_locking_mode_to_string diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index f715f70da7c..068309abaca 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -7370,8 +7370,8 @@ let vmss_create printer rpc session_id params = failwith ("No default value for parameter " ^ param_name) in let name_label = List.assoc "name-label" params in - let ty = Record_util.string_to_vmss_type (get "type") in - let frequency = Record_util.string_to_vmss_frequency (get "frequency") in + let ty = Record_util.vmss_type_of_string (get "type") in + let frequency = Record_util.vmss_frequency_of_string (get "frequency") in let schedule = read_map_params "schedule" params in (* optional parameters with default values *) let name_description = get "name-description" ~default:"" in diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 70a9e0912ab..919ba894002 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -228,43 +228,6 @@ let string_to_vif_locking_mode = function record_failure "Expected 'network_default', 'locked', 'unlocked', 'disabled', got %s" s -let vmss_type_to_string = function - | `snapshot -> - "snapshot" - | `checkpoint -> - "checkpoint" - | `snapshot_with_quiesce -> - "snapshot_with_quiesce" - -let string_to_vmss_type = function - | "snapshot" -> - `snapshot - | "checkpoint" -> - `checkpoint - | "snapshot_with_quiesce" -> - `snapshot_with_quiesce - | s -> - record_failure - "Expected 'snapshot', 'checkpoint', 'snapshot_with_quiesce', got %s" s - -let vmss_frequency_to_string = function - | `hourly -> - "hourly" - | `daily -> - "daily" - | `weekly -> - "weekly" - -let string_to_vmss_frequency = function - | "hourly" -> - `hourly - | "daily" -> - `daily - | "weekly" -> - `weekly - | s -> - record_failure "Expected 'hourly', 'daily', 'weekly', got %s" s - let network_default_locking_mode_to_string = function | `unlocked -> "unlocked" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 22c01f8e63e..4b133ef2570 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1518,7 +1518,7 @@ let vmss_record rpc session_id vmss = ~get:(fun () -> Record_util.vmss_type_to_string (x ()).API.vMSS_type) ~set:(fun x -> Client.VMSS.set_type ~rpc ~session_id ~self:vmss - ~value:(Record_util.string_to_vmss_type x) + ~value:(Record_util.vmss_type_of_string x) ) () ; make_field ~name:"retained-snapshots" @@ -1536,7 +1536,7 @@ let vmss_record rpc session_id vmss = ) ~set:(fun x -> Client.VMSS.set_frequency ~rpc ~session_id ~self:vmss - ~value:(Record_util.string_to_vmss_frequency x) + ~value:(Record_util.vmss_frequency_of_string x) ) () ; make_field ~name:"schedule" From 57683a78ed11a5cabc9aa895a4ab489628c161be Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 09:13:36 +0100 Subject: [PATCH 251/341] IH-689: record_util.ml - move to autogenerated vif_locking_mode Expands tests since these did not consider the 'string_to_X' form of conversion functions. Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 4 +++- ocaml/xapi-cli-server/record_util.ml | 23 --------------------- ocaml/xapi-cli-server/records.ml | 2 +- 3 files changed, 4 insertions(+), 25 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index c4459f2c4ec..d4b48bd6104 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -146,7 +146,9 @@ let tests = (O.vbd_operation_to_string, N.vbd_operations_to_string) ; mk __LINE__ None all_vif_operations (O.vif_operation_to_string, N.vif_operations_to_string) - ; mk __LINE__ None all_vif_locking_mode + ; mk __LINE__ + (Some (O.string_to_vif_locking_mode, N.vif_locking_mode_of_string)) + all_vif_locking_mode (O.vif_locking_mode_to_string, N.vif_locking_mode_to_string) ; mk __LINE__ (Some (O.string_to_vmss_type, N.vmss_type_of_string)) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 919ba894002..4f68b687ac6 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -205,29 +205,6 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let vif_locking_mode_to_string = function - | `network_default -> - "network_default" - | `locked -> - "locked" - | `unlocked -> - "unlocked" - | `disabled -> - "disabled" - -let string_to_vif_locking_mode = function - | "network_default" -> - `network_default - | "locked" -> - `locked - | "unlocked" -> - `unlocked - | "disabled" -> - `disabled - | s -> - record_failure - "Expected 'network_default', 'locked', 'unlocked', 'disabled', got %s" s - let network_default_locking_mode_to_string = function | `unlocked -> "unlocked" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 4b133ef2570..c2d80f0eb25 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -920,7 +920,7 @@ let vif_record rpc session_id vif = ) ~set:(fun value -> Client.VIF.set_locking_mode ~rpc ~session_id ~self:vif - ~value:(Record_util.string_to_vif_locking_mode value) + ~value:(Record_util.vif_locking_mode_of_string value) ) () ; make_field ~name:"ipv4-allowed" From a229d8bc6fad9d70e1b2603914072b0e3e695c19 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 09:15:17 +0100 Subject: [PATCH 252/341] IH-689: record_util.ml - remove manually-specified network_{purpose,default_locking_mode} Expands tests, since these did not consider the 'string_to_X' form of conversion functions. Renames usage sites where needed. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 12 +++++++-- ocaml/xapi-cli-server/record_util.ml | 28 --------------------- ocaml/xapi-cli-server/records.ml | 6 ++--- 3 files changed, 13 insertions(+), 33 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index d4b48bd6104..4c60fe569a2 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -158,11 +158,19 @@ let tests = (Some (O.string_to_vmss_frequency, N.vmss_frequency_of_string)) all_vmss_frequency (O.vmss_frequency_to_string, N.vmss_frequency_to_string) - ; mk __LINE__ None all_network_default_locking_mode + ; mk __LINE__ + (Some + ( O.string_to_network_default_locking_mode + , N.network_default_locking_mode_of_string + ) + ) + all_network_default_locking_mode ( O.network_default_locking_mode_to_string , N.network_default_locking_mode_to_string ) - ; mk __LINE__ None all_network_purpose + ; mk __LINE__ + (Some (O.string_to_network_purpose, N.network_purpose_of_string)) + all_network_purpose (O.network_purpose_to_string, N.network_purpose_to_string) ; mk __LINE__ None all_vm_appliance_operation (O.vm_appliance_operation_to_string, N.vm_appliance_operation_to_string) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 4f68b687ac6..5499bdd5425 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -205,34 +205,6 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let network_default_locking_mode_to_string = function - | `unlocked -> - "unlocked" - | `disabled -> - "disabled" - -let string_to_network_default_locking_mode = function - | "unlocked" -> - `unlocked - | "disabled" -> - `disabled - | s -> - record_failure "Expected 'unlocked' or 'disabled', got %s" s - -let network_purpose_to_string : API.network_purpose -> string = function - | `nbd -> - "nbd" - | `insecure_nbd -> - "insecure_nbd" - -let string_to_network_purpose : string -> API.network_purpose = function - | "nbd" -> - `nbd - | "insecure_nbd" -> - `insecure_nbd - | s -> - record_failure "Expected a network purpose string; got %s" s - let vm_appliance_operation_to_string = function | `start -> "start" diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index c2d80f0eb25..78b94643865 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1070,7 +1070,7 @@ let net_record rpc session_id net = ~set:(fun value -> Client.Network.set_default_locking_mode ~rpc ~session_id ~network:net - ~value:(Record_util.string_to_network_default_locking_mode value) + ~value:(Record_util.network_default_locking_mode_of_string value) ) () ; make_field ~name:"purpose" @@ -1084,11 +1084,11 @@ let net_record rpc session_id net = ) ~add_to_set:(fun s -> Client.Network.add_purpose ~rpc ~session_id ~self:net - ~value:(Record_util.string_to_network_purpose s) + ~value:(Record_util.network_purpose_of_string s) ) ~remove_from_set:(fun s -> Client.Network.remove_purpose ~rpc ~session_id ~self:net - ~value:(Record_util.string_to_network_purpose s) + ~value:(Record_util.network_purpose_of_string s) ) () ] From f2f359a832901c40bde28dcf3f7590289631f12d Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 09:22:45 +0100 Subject: [PATCH 253/341] IH-689: record_util.ml - remove on_{softreboot,normal_exit,crash,vdi_onboot} Removes manually-specified versions in favour of identical automatically-generated versions. Justifies shadowing the few that do not have consistent behaviour. Renames 'to_string_X' at usage sites where needed. Expands tests, since these did not consider inconsistently named 'string_to_X' conversion functions. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 23 +++++++-- ocaml/xapi-cli-server/record_util.ml | 53 ++------------------- ocaml/xapi-cli-server/records.ml | 10 ++-- 3 files changed, 26 insertions(+), 60 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index 4c60fe569a2..d89fc9e8b05 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -184,12 +184,22 @@ let tests = ; mk __LINE__ None all_task_allowed_operations (O.task_allowed_operations_to_string, N.task_allowed_operations_to_string) (*; mk __LINE__ None all_alert_level (O.alert_level_to_string, N.alert_level_to_string)*) - ; mk __LINE__ None all_on_normal_exit + ; mk __LINE__ + (Some (O.string_to_on_normal_exit, N.on_normal_exit_of_string)) + all_on_normal_exit (O.on_normal_exit_to_string, N.on_normal_exit_to_string) - ; mk __LINE__ None all_on_crash_behaviour + ; mk __LINE__ + (Some (O.string_to_on_crash_behaviour, N.on_crash_behaviour_of_string)) + all_on_crash_behaviour (O.on_crash_behaviour_to_string, N.on_crash_behaviour_to_string) - ; mk __LINE__ None all_on_softreboot_behavior - (O.on_softreboot_behaviour_to_string, N.on_softreboot_behaviour_to_string) + ; mk __LINE__ + (Some + ( O.string_to_on_softreboot_behaviour + , N.on_softreboot_behavior_of_string + ) + ) + all_on_softreboot_behavior + (N.on_softreboot_behaviour_to_string, N.on_softreboot_behaviour_to_string) ; mk __LINE__ None all_host_display (O.host_display_to_string, N.host_display_to_string) ; mk __LINE__ @@ -286,7 +296,10 @@ let tests = ( O.network_sriov_configuration_mode_to_string , N.sriov_configuration_mode_to_string ) - ; mk __LINE__ None all_on_boot (O.on_boot_to_string, N.on_boot_to_string) + ; mk __LINE__ + (Some (O.string_to_vdi_onboot, N.on_boot_of_string)) + all_on_boot + (O.on_boot_to_string, N.on_boot_to_string) ; mk __LINE__ None all_tristate_type (O.tristate_to_string, N.tristate_to_string) ; mk __LINE__ diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 5499bdd5425..4979973ae17 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -381,18 +381,11 @@ let task_allowed_operations_to_string s = let alert_level_to_string s = match s with `Info -> "info" | `Warn -> "warning" | `Error -> "error" +(* Intentional shadowing - inconsistent capitalization *) let on_normal_exit_to_string x = match x with `destroy -> "Destroy" | `restart -> "Restart" -let string_to_on_normal_exit s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "restart" -> - `restart - | _ -> - record_failure "Expected 'destroy' or 'restart', got %s" s - +(* Intentional shadowing - inconsistent capitalization *) let on_crash_behaviour_to_string x = match x with | `destroy -> @@ -408,27 +401,7 @@ let on_crash_behaviour_to_string x = | `rename_restart -> "Rename restart" -let string_to_on_crash_behaviour s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "coredump_and_destroy" -> - `coredump_and_destroy - | "restart" -> - `restart - | "coredump_and_restart" -> - `coredump_and_restart - | "preserve" -> - `preserve - | "rename_restart" -> - `rename_restart - | _ -> - record_failure - "Expected 'destroy', 'coredump_and_destroy', \ - 'restart','coredump_and_restart', 'preserve' or 'rename_restart', got \ - %s" - s - +(* Intentional shadowing - inconsistent capitalization *) let on_softreboot_behaviour_to_string x = match x with | `destroy -> @@ -440,23 +413,6 @@ let on_softreboot_behaviour_to_string x = | `soft_reboot -> "Soft reboot" -let string_to_on_softreboot_behaviour s = - match String.lowercase_ascii s with - | "destroy" -> - `destroy - | "restart" -> - `restart - | "preserve" -> - `preserve - | "soft_reboot" -> - `soft_reboot - | _ -> - record_failure - "Expected 'destroy', 'coredump_and_destroy', 'restart', \ - 'coredump_and_restart', 'preserve', 'soft_reboot' or \ - 'rename_restart', got %s" - s - let host_display_to_string h = match h with | `enabled -> @@ -718,9 +674,6 @@ let pif_igmp_status_to_string = function | `unknown -> "unknown" -let on_boot_to_string onboot = - match onboot with `reset -> "reset" | `persist -> "persist" - let tristate_to_string tristate = match tristate with | `yes -> diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 78b94643865..47e4b15ca9f 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1962,7 +1962,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_shutdown ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_normal_exit x) + ~value:(Record_util.on_normal_exit_of_string x) ) () ; make_field ~name:"actions-after-softreboot" @@ -1972,7 +1972,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_softreboot ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_softreboot_behaviour x) + ~value:(Record_util.on_softreboot_behavior_of_string x) ) () ; make_field ~name:"actions-after-reboot" @@ -1982,7 +1982,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_reboot ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_normal_exit x) + ~value:(Record_util.on_normal_exit_of_string x) ) () ; make_field ~name:"actions-after-crash" @@ -1992,7 +1992,7 @@ let vm_record rpc session_id vm = ) ~set:(fun x -> Client.VM.set_actions_after_crash ~rpc ~session_id ~self:vm - ~value:(Record_util.string_to_on_crash_behaviour x) + ~value:(Record_util.on_crash_behaviour_of_string x) ) () ; make_field ~name:"console-uuids" @@ -3359,7 +3359,7 @@ let vdi_record rpc session_id vdi = ~get:(fun () -> Record_util.on_boot_to_string (x ()).API.vDI_on_boot) ~set:(fun onboot -> Client.VDI.set_on_boot ~rpc ~session_id ~self:vdi - ~value:(Record_util.string_to_vdi_onboot onboot) + ~value:(Record_util.on_boot_of_string onboot) ) () ; make_field ~name:"allow-caching" From 1e851c87d0ff3e9a0459aa525e3e2b7d9fb36abd Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 13:53:12 +0100 Subject: [PATCH 254/341] IH-689: record_util.ml - move to autogenerated vm_uefi_mode Its conversion function was not being tested because it did not have a to_string function, only an of_string one. An automatically generated to_string function is used in the test instead. Signed-off-by: Andrii Sultanov --- ocaml/tests/record_util/test_record_util.ml | 4 ++++ ocaml/xapi-cli-server/record_util.ml | 8 -------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ocaml/tests/record_util/test_record_util.ml b/ocaml/tests/record_util/test_record_util.ml index d89fc9e8b05..c3c54f326e8 100644 --- a/ocaml/tests/record_util/test_record_util.ml +++ b/ocaml/tests/record_util/test_record_util.ml @@ -122,6 +122,10 @@ let tests = (O.power_to_string, N.vm_power_state_to_lowercase_string) ; mk __LINE__ None all_vm_operations (O.vm_operation_to_string, N.vm_operation_to_string) + ; mk __LINE__ + (Some (O.vm_uefi_mode_of_string, N.vm_uefi_mode_of_string)) + all_vm_uefi_mode + (N.vm_uefi_mode_to_string, N.vm_uefi_mode_to_string) ; mk __LINE__ None all_vm_secureboot_readiness (O.vm_secureboot_readiness_to_string, N.vm_secureboot_readiness_to_string) ; mk __LINE__ None all_pool_guest_secureboot_readiness diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 4979973ae17..63e27d2eb15 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -101,14 +101,6 @@ let string_to_vm_operation x = else List.assoc x table -let vm_uefi_mode_of_string = function - | "setup" -> - `setup - | "user" -> - `user - | s -> - record_failure "Expected 'user','setup', got %s" s - let vm_secureboot_readiness_to_string = function | `not_supported -> "not_supported" From d5f72c3da074b4125ccd1d2d84efe79933efde24 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 13:53:39 +0100 Subject: [PATCH 255/341] IH-689: Remove trivial manually-specified record_util conversion functions All of these are already covered by tests, and do not require any renaming on the usage sites. Signed-off-by: Andrii Sultanov --- ocaml/xapi-cli-server/record_util.ml | 242 --------------------------- 1 file changed, 242 deletions(-) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index 63e27d2eb15..bb5120bb95b 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -28,14 +28,6 @@ include Generated_record_utils let to_str = function Rpc.String x -> x | _ -> failwith "Invalid" -let certificate_type_to_string = function - | `host -> - "host" - | `host_internal -> - "host_internal" - | `ca -> - "ca" - let vm_operation_table = [ (`assert_operation_valid, "assertoperationvalid") @@ -101,30 +93,6 @@ let string_to_vm_operation x = else List.assoc x table -let vm_secureboot_readiness_to_string = function - | `not_supported -> - "not_supported" - | `disabled -> - "disabled" - | `first_boot -> - "first_boot" - | `ready -> - "ready" - | `ready_no_dbx -> - "ready_no_dbx" - | `setup_mode -> - "setup_mode" - | `certs_incomplete -> - "certs_incomplete" - -let pool_guest_secureboot_readiness_to_string = function - | `ready -> - "ready" - | `ready_no_dbx -> - "ready_no_dbx" - | `not_ready -> - "not_ready" - let host_operation_to_string = function | `provision -> "provision" @@ -147,14 +115,6 @@ let host_operation_to_string = function | `enable -> "enable" -let latest_synced_updates_applied_state_to_string = function - | `yes -> - "yes" - | `no -> - "no" - | `unknown -> - "unknown" - let sr_operation_to_string : API.storage_operations -> string = function | `scan -> "scan" @@ -197,16 +157,6 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" -let vm_appliance_operation_to_string = function - | `start -> - "start" - | `clean_shutdown -> - "clean_shutdown" - | `hard_shutdown -> - "hard_shutdown" - | `shutdown -> - "shutdown" - let cpu_feature_to_string f = match f with | `FPU -> @@ -338,19 +288,6 @@ let cpu_feature_to_string f = | `VMX -> "VMX" -let task_status_type_to_string s = - match s with - | `pending -> - "pending" - | `success -> - "success" - | `failure -> - "failure" - | `cancelling -> - "cancelling" - | `cancelled -> - "cancelled" - let protocol_to_string = function | `vt100 -> "VT100" @@ -359,14 +296,6 @@ let protocol_to_string = function | `rdp -> "RDP" -let telemetry_frequency_to_string = function - | `daily -> - "daily" - | `weekly -> - "weekly" - | `monthly -> - "monthly" - let task_allowed_operations_to_string s = match s with `cancel -> "Cancel" | `destroy -> "Destroy" @@ -405,56 +334,6 @@ let on_softreboot_behaviour_to_string x = | `soft_reboot -> "Soft reboot" -let host_display_to_string h = - match h with - | `enabled -> - "enabled" - | `enable_on_reboot -> - "enable_on_reboot" - | `disabled -> - "disabled" - | `disable_on_reboot -> - "disable_on_reboot" - -let host_sched_gran_of_string s = - match String.lowercase_ascii s with - | "core" -> - `core - | "cpu" -> - `cpu - | "socket" -> - `socket - | _ -> - record_failure "Expected 'core','cpu', 'socket', got %s" s - -let host_sched_gran_to_string = function - | `core -> - "core" - | `cpu -> - "cpu" - | `socket -> - "socket" - -let host_numa_affinity_policy_to_string = function - | `any -> - "any" - | `best_effort -> - "best_effort" - | `default_policy -> - "default_policy" - -let host_numa_affinity_policy_of_string a = - match String.lowercase_ascii a with - | "any" -> - `any - | "best_effort" -> - `best_effort - | "default_policy" -> - `default_policy - | s -> - record_failure "Expected 'any', 'best_effort' or 'default_policy', got %s" - s - let pci_dom0_access_to_string x = host_display_to_string x let string_to_vdi_onboot s = @@ -499,93 +378,6 @@ let vdi_type_to_string t = | `cbt_metadata -> "CBT metadata" -let ip_configuration_mode_to_string = function - | `None -> - "None" - | `DHCP -> - "DHCP" - | `Static -> - "Static" - -let ip_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "dhcp" -> - `DHCP - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'dhcp','none' or 'static', got %s" s - -let vif_ipv4_configuration_mode_to_string = function - | `None -> - "None" - | `Static -> - "Static" - -let vif_ipv4_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'none' or 'static', got %s" s - -let ipv6_configuration_mode_to_string = function - | `None -> - "None" - | `DHCP -> - "DHCP" - | `Static -> - "Static" - | `Autoconf -> - "Autoconf" - -let ipv6_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "dhcp" -> - `DHCP - | "none" -> - `None - | "static" -> - `Static - | "autoconf" -> - `Autoconf - | s -> - record_failure "Expected 'dhcp','none' 'autoconf' or 'static', got %s" s - -let vif_ipv6_configuration_mode_to_string = function - | `None -> - "None" - | `Static -> - "Static" - -let vif_ipv6_configuration_mode_of_string m = - match String.lowercase_ascii m with - | "none" -> - `None - | "static" -> - `Static - | s -> - record_failure "Expected 'none' or 'static', got %s" s - -let primary_address_type_to_string = function - | `IPv4 -> - "IPv4" - | `IPv6 -> - "IPv6" - -let primary_address_type_of_string m = - match String.lowercase_ascii m with - | "ipv4" -> - `IPv4 - | "ipv6" -> - `IPv6 - | s -> - record_failure "Expected 'ipv4' or 'ipv6', got %s" s - let bond_mode_to_string = function | `balanceslb -> "balance-slb" @@ -647,25 +439,6 @@ let bool_of_string s = record_failure "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s -let tunnel_protocol_of_string s = - match String.lowercase_ascii s with - | "gre" -> - `gre - | "vxlan" -> - `vxlan - | _ -> - record_failure "Expected 'gre','vxlan', got %s" s - -let tunnel_protocol_to_string = function `gre -> "gre" | `vxlan -> "vxlan" - -let pif_igmp_status_to_string = function - | `enabled -> - "enabled" - | `disabled -> - "disabled" - | `unknown -> - "unknown" - let tristate_to_string tristate = match tristate with | `yes -> @@ -741,21 +514,6 @@ let mac_from_int_array macs = (* generate a random mac that is locally administered *) let random_mac_local () = mac_from_int_array (Array.make 6 (Random.int 0x100)) -let update_sync_frequency_to_string = function - | `daily -> - "daily" - | `weekly -> - "weekly" - -let update_sync_frequency_of_string s = - match String.lowercase_ascii s with - | "daily" -> - `daily - | "weekly" -> - `weekly - | _ -> - record_failure "Expected 'daily', 'weekly', got %s" s - let vm_placement_policy_to_string = function | `normal -> "normal" From aa4c447895ebaad34f72164c70836013b13abe7f Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 23 Aug 2024 10:02:53 +0100 Subject: [PATCH 256/341] IH-689: Justify intentional shadowing in record_util.ml Signed-off-by: Andrii Sultanov --- ocaml/xapi-cli-server/record_util.ml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml index bb5120bb95b..a7a4dd2ec72 100644 --- a/ocaml/xapi-cli-server/record_util.ml +++ b/ocaml/xapi-cli-server/record_util.ml @@ -77,12 +77,17 @@ let vm_operation_table = ; (`create_vtpm, "create_vtpm") ] +(* Intentional shadowing - data_souces_op, assertoperationinvalid, + changing_vcpus, changing_memory_limits, query_services, create_template + are inconsistent *) let vm_operation_to_string x = if not (List.mem_assoc x vm_operation_table) then "(unknown operation)" else List.assoc x vm_operation_table +(* Intentional shadowing - + In addition to the above, also inconsistent exceptions *) let string_to_vm_operation x = let table = List.map (fun (a, b) -> (b, a)) vm_operation_table in if not (List.mem_assoc x table) then @@ -93,6 +98,8 @@ let string_to_vm_operation x = else List.assoc x table +(* Intentional shadowing - inconsistent behaviour: + vm_start, vm_resume, vm_migrate *) let host_operation_to_string = function | `provision -> "provision" @@ -115,6 +122,7 @@ let host_operation_to_string = function | `enable -> "enable" +(* Intentional shadowing - inconsistent behaviour around _/. *) let sr_operation_to_string : API.storage_operations -> string = function | `scan -> "scan" @@ -157,6 +165,7 @@ let sr_operation_to_string : API.storage_operations -> string = function | `pbd_destroy -> "PBD.destroy" +(* Is not defined in the datamodel - only defined here *) let cpu_feature_to_string f = match f with | `FPU -> @@ -288,6 +297,7 @@ let cpu_feature_to_string f = | `VMX -> "VMX" +(* Intentional shadowing - inconsistent capitalization *) let protocol_to_string = function | `vt100 -> "VT100" @@ -296,9 +306,11 @@ let protocol_to_string = function | `rdp -> "RDP" +(* Intentional shadowing - inconsistent capitalization *) let task_allowed_operations_to_string s = match s with `cancel -> "Cancel" | `destroy -> "Destroy" +(* Is not defined in the datamodel - only defined here *) let alert_level_to_string s = match s with `Info -> "info" | `Warn -> "warning" | `Error -> "error" @@ -353,6 +365,7 @@ let vbd_mode_to_string = function `RO -> "ro" | `RW -> "rw" let vm_power_state_to_lowercase_string h = vm_power_state_to_string h |> String.uncapitalize_ascii +(* Intentional shadowing - inconsistent capitalization *) let vdi_type_to_string t = match t with | `system -> @@ -378,6 +391,7 @@ let vdi_type_to_string t = | `cbt_metadata -> "CBT metadata" +(* Intentional shadowing - inconsistent underscore/dash *) let bond_mode_to_string = function | `balanceslb -> "balance-slb" @@ -386,6 +400,7 @@ let bond_mode_to_string = function | `lacp -> "lacp" +(* Intentional shadowing - inconsistent underscore/dash, custom case *) let bond_mode_of_string m = match String.lowercase_ascii m with | "balance-slb" | "" -> @@ -397,12 +412,14 @@ let bond_mode_of_string m = | s -> record_failure "Invalid bond mode. Got %s" s +(* Intentional shadowing - inconsistent underscore/dash *) let allocation_algorithm_to_string = function | `depth_first -> "depth-first" | `breadth_first -> "breadth-first" +(* Intentional shadowing - inconsistent underscore/dash *) let allocation_algorithm_of_string a = match String.lowercase_ascii a with | "depth-first" -> @@ -412,6 +429,7 @@ let allocation_algorithm_of_string a = | s -> record_failure "Invalid allocation algorithm. Got %s" s +(* Intentional shadowing - inconsistent underscore/dash *) let pvs_proxy_status_to_string = function | `stopped -> "stopped" @@ -439,6 +457,7 @@ let bool_of_string s = record_failure "Expected 'true','t','yes','y','1','false','f','no','n','0' got %s" s +(* Intentional shadowing - inconsistent naming *) let tristate_to_string tristate = match tristate with | `yes -> @@ -448,6 +467,7 @@ let tristate_to_string tristate = | `unspecified -> "unspecified" +(* Intentional shadowing - inconsistent underscore/dash *) let domain_type_to_string = function | `hvm -> "hvm" @@ -460,6 +480,7 @@ let domain_type_to_string = function | `unspecified -> "unspecified" +(* Intentional shadowing - inconsistent underscore/dash *) let domain_type_of_string x = match String.lowercase_ascii x with | "hvm" -> @@ -514,12 +535,14 @@ let mac_from_int_array macs = (* generate a random mac that is locally administered *) let random_mac_local () = mac_from_int_array (Array.make 6 (Random.int 0x100)) +(* Intentional shadowing - inconsistent underscore/dash *) let vm_placement_policy_to_string = function | `normal -> "normal" | `anti_affinity -> "anti-affinity" +(* Intentional shadowing - inconsistent underscore/dash *) let vm_placement_policy_of_string a = match String.lowercase_ascii a with | "normal" -> From cf8ba8b1f05c3490953f824c678528fba9a76bd2 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 20 Jun 2024 14:23:07 +0000 Subject: [PATCH 257/341] Add feature flag to block starting VMs The flag is added to the `enabled_when_unknown` list, which tells xapi to enable the feature if the flag is absent from `host.license_params` (as opposed to present and `true` or `false`). This is done to ensure that VM start is not inadvertently blocked during the rolling pool update that introduces this change. Signed-off-by: Rob Hoes --- ocaml/tests/test_features.ml | 20 +++++++++++--------- ocaml/xapi-types/features.ml | 4 +++- ocaml/xapi-types/features.mli | 1 + ocaml/xapi/message_forwarding.ml | 3 +++ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/ocaml/tests/test_features.ml b/ocaml/tests/test_features.ml index c2c4f5c25e8..3547556f746 100644 --- a/ocaml/tests/test_features.ml +++ b/ocaml/tests/test_features.ml @@ -29,35 +29,37 @@ module OfAssocList = Generic.MakeStateless (struct let transform = of_assoc_list - (* Xen_motion and AD are enabled unless explicitly disabled. All other features - are disabled unless explitly enabled. *) + (* Some features are enabled unless explicitly disabled (see `enabled_when_unknown` + in features.ml). All other features are disabled unless explitly enabled. *) let tests = `QuickAndAutoDocumented [ - ([], [Xen_motion; AD; Updates]) + ([], [Xen_motion; AD; Updates; VM_start]) ; ( [ ("restrict_xen_motion", "true") ; ("restrict_ad", "true") ; ("restrict_updates", "true") + ; ("restrict_vm_start", "true") ] , [] ) - ; ([("restrict_xen_motion", "true")], [AD; Updates]) - ; ([("restrict_xen_motion", "false")], [Xen_motion; AD; Updates]) + ; ([("restrict_xen_motion", "true")], [AD; Updates; VM_start]) + ; ([("restrict_xen_motion", "false")], [Xen_motion; AD; Updates; VM_start]) ; ( [("restrict_xen_motion", "false"); ("restrict_dmc", "false")] - , [DMC; Xen_motion; AD; Updates] + , [DMC; Xen_motion; AD; Updates; VM_start] ) ; ( [ ("restrict_xen_motion", "false") ; ("restrict_ad", "true") ; ("restrict_dmc", "false") ] - , [DMC; Xen_motion; Updates] + , [DMC; Xen_motion; Updates; VM_start] ) ; ( [("enable_xha", "true"); ("restrict_xen_motion", "true")] - , [HA; AD; Updates] + , [HA; AD; Updates; VM_start] ) - ; ([("restrict_updates", "true")], [Xen_motion; AD]) + ; ([("restrict_updates", "true")], [Xen_motion; AD; VM_start]) + ; ([("restrict_vm_start", "true")], [Xen_motion; AD; Updates]) ] end) diff --git a/ocaml/xapi-types/features.ml b/ocaml/xapi-types/features.ml index 6e838f32b83..ed952c86b8d 100644 --- a/ocaml/xapi-types/features.ml +++ b/ocaml/xapi-types/features.ml @@ -65,6 +65,7 @@ type feature = | Internal_repo_access | VTPM | VM_groups + | VM_start [@@deriving rpc] type orientation = Positive | Negative @@ -134,13 +135,14 @@ let keys_of_features = ) ; (VTPM, ("restrict_vtpm", Negative, "VTPM")) ; (VM_groups, ("restrict_vm_groups", Negative, "VM_groups")) + ; (VM_start, ("restrict_vm_start", Negative, "Start")) ] (* A list of features that must be considered "enabled" by `of_assoc_list` if the feature string is missing from the list. These are existing features that have been recently restricted, and which we want to remain enabled during a rolling pool upgrade. *) -let enabled_when_unknown = [Xen_motion; AD; Updates] +let enabled_when_unknown = [Xen_motion; AD; Updates; VM_start] let name_of_feature f = rpc_of_feature f |> Rpc.string_of_rpc diff --git a/ocaml/xapi-types/features.mli b/ocaml/xapi-types/features.mli index bcd1ef4ac66..fdad4316d86 100644 --- a/ocaml/xapi-types/features.mli +++ b/ocaml/xapi-types/features.mli @@ -73,6 +73,7 @@ type feature = (** Enable restriction on repository access to pool members only *) | VTPM (** Support VTPM device required by Win11 guests *) | VM_groups (** Enable use of VM groups *) + | VM_start (** Allow starting of VMs (!) *) val feature_of_rpc : Rpc.t -> feature (** Convert RPC into {!feature}s *) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index ce6e69ef54e..81fdf80116d 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -1854,6 +1854,7 @@ functor let start ~__context ~vm ~start_paused ~force = info "VM.start: VM = '%s'" (vm_uuid ~__context vm) ; + Pool_features.assert_enabled ~__context ~f:Features.VM_start ; Xapi_vm_helpers.assert_no_legacy_hardware ~__context ~vm ; let local_fn = Local.VM.start ~vm ~start_paused ~force in let host = @@ -2914,6 +2915,8 @@ functor info "VM.assert_can_boot_here: VM = '%s'; host = '%s'" (vm_uuid ~__context self) (host_uuid ~__context host) ; + if Db.VM.get_power_state ~__context ~self = `Halted then + Pool_features.assert_enabled ~__context ~f:Features.VM_start ; Local.VM.assert_can_boot_here ~__context ~self ~host let retrieve_wlb_recommendations ~__context ~vm = From 8f4c19358182917d6700879f1be196544c0fdfe9 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Fri, 12 Jul 2024 13:38:11 +0000 Subject: [PATCH 258/341] Add feature flag to block starting VM appliances Signed-off-by: Rob Hoes --- ocaml/tests/test_features.ml | 25 +++++++++++++++++-------- ocaml/xapi-types/features.ml | 5 ++++- ocaml/xapi-types/features.mli | 1 + ocaml/xapi/message_forwarding.ml | 1 + 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/ocaml/tests/test_features.ml b/ocaml/tests/test_features.ml index 3547556f746..53b167cc2f8 100644 --- a/ocaml/tests/test_features.ml +++ b/ocaml/tests/test_features.ml @@ -34,32 +34,41 @@ module OfAssocList = Generic.MakeStateless (struct let tests = `QuickAndAutoDocumented [ - ([], [Xen_motion; AD; Updates; VM_start]) + ([], [Xen_motion; AD; Updates; VM_start; VM_appliance_start]) ; ( [ ("restrict_xen_motion", "true") ; ("restrict_ad", "true") ; ("restrict_updates", "true") ; ("restrict_vm_start", "true") + ; ("restrict_vm_appliance_start", "true") ] , [] ) - ; ([("restrict_xen_motion", "true")], [AD; Updates; VM_start]) - ; ([("restrict_xen_motion", "false")], [Xen_motion; AD; Updates; VM_start]) + ; ( [("restrict_xen_motion", "true")] + , [AD; Updates; VM_start; VM_appliance_start] + ) + ; ( [("restrict_xen_motion", "false")] + , [Xen_motion; AD; Updates; VM_start; VM_appliance_start] + ) ; ( [("restrict_xen_motion", "false"); ("restrict_dmc", "false")] - , [DMC; Xen_motion; AD; Updates; VM_start] + , [DMC; Xen_motion; AD; Updates; VM_start; VM_appliance_start] ) ; ( [ ("restrict_xen_motion", "false") ; ("restrict_ad", "true") ; ("restrict_dmc", "false") ] - , [DMC; Xen_motion; Updates; VM_start] + , [DMC; Xen_motion; Updates; VM_start; VM_appliance_start] ) ; ( [("enable_xha", "true"); ("restrict_xen_motion", "true")] - , [HA; AD; Updates; VM_start] + , [HA; AD; Updates; VM_start; VM_appliance_start] + ) + ; ( [("restrict_updates", "true")] + , [Xen_motion; AD; VM_start; VM_appliance_start] + ) + ; ( [("restrict_vm_start", "true")] + , [Xen_motion; AD; Updates; VM_appliance_start] ) - ; ([("restrict_updates", "true")], [Xen_motion; AD; VM_start]) - ; ([("restrict_vm_start", "true")], [Xen_motion; AD; Updates]) ] end) diff --git a/ocaml/xapi-types/features.ml b/ocaml/xapi-types/features.ml index ed952c86b8d..c80d3c833a5 100644 --- a/ocaml/xapi-types/features.ml +++ b/ocaml/xapi-types/features.ml @@ -66,6 +66,7 @@ type feature = | VTPM | VM_groups | VM_start + | VM_appliance_start [@@deriving rpc] type orientation = Positive | Negative @@ -136,13 +137,15 @@ let keys_of_features = ; (VTPM, ("restrict_vtpm", Negative, "VTPM")) ; (VM_groups, ("restrict_vm_groups", Negative, "VM_groups")) ; (VM_start, ("restrict_vm_start", Negative, "Start")) + ; (VM_appliance_start, ("restrict_vm_appliance_start", Negative, "Start")) ] (* A list of features that must be considered "enabled" by `of_assoc_list` if the feature string is missing from the list. These are existing features that have been recently restricted, and which we want to remain enabled during a rolling pool upgrade. *) -let enabled_when_unknown = [Xen_motion; AD; Updates; VM_start] +let enabled_when_unknown = + [Xen_motion; AD; Updates; VM_start; VM_appliance_start] let name_of_feature f = rpc_of_feature f |> Rpc.string_of_rpc diff --git a/ocaml/xapi-types/features.mli b/ocaml/xapi-types/features.mli index fdad4316d86..f6efce3f0a5 100644 --- a/ocaml/xapi-types/features.mli +++ b/ocaml/xapi-types/features.mli @@ -74,6 +74,7 @@ type feature = | VTPM (** Support VTPM device required by Win11 guests *) | VM_groups (** Enable use of VM groups *) | VM_start (** Allow starting of VMs (!) *) + | VM_appliance_start (** Allow starting of VM appliances *) val feature_of_rpc : Rpc.t -> feature (** Convert RPC into {!feature}s *) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index 81fdf80116d..1b28e0059e4 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -743,6 +743,7 @@ functor let start ~__context ~self ~paused = info "VM_appliance.start: VM_appliance = '%s'" (vm_appliance_uuid ~__context self) ; + Pool_features.assert_enabled ~__context ~f:Features.VM_appliance_start ; with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.start" ~op:`start (fun () -> Local.VM_appliance.start ~__context ~self ~paused From 7143ce81f1f1725f6e574ebb7583bb2905d6a160 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Wed, 28 Aug 2024 10:08:12 +0000 Subject: [PATCH 259/341] Update datamodel lifecycle Signed-off-by: Rob Hoes --- ocaml/idl/datamodel_lifecycle.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index 1a101ead83b..f759eeadbdf 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -26,7 +26,7 @@ let prototyped_of_field = function | "Observer", "uuid" -> Some "23.14.0" | "Repository", "origin" -> - Some "24.21.0-next" + Some "24.23.0" | "Repository", "gpgkey_path" -> Some "22.12.0" | "Certificate", "fingerprint_sha1" -> @@ -126,7 +126,7 @@ let prototyped_of_message = function | "Repository", "set_gpgkey_path" -> Some "22.12.0" | "Repository", "introduce_bundle" -> - Some "24.21.0-next" + Some "24.23.0" | "PCI", "get_dom0_access_status" -> Some "24.14.0" | "PCI", "enable_dom0_access" -> From f2d3a6ae3fa58fe4e4f71db90f160f77f670941f Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 28 Aug 2024 15:03:47 +0100 Subject: [PATCH 260/341] http-lib: log reason that causes lack of response Otherwise users are nonthewiser when a long migration fails. With this change and a reproduction we can have the chance to understand the issue and maybe fix it. Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/http_client.ml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ocaml/libs/http-lib/http_client.ml b/ocaml/libs/http-lib/http_client.ml index 163cfdd0dde..8e8c5cd2d44 100644 --- a/ocaml/libs/http-lib/http_client.ml +++ b/ocaml/libs/http-lib/http_client.ml @@ -187,7 +187,9 @@ let response_of_fd ?(use_fastpath = false) fd = with | Unix.Unix_error (_, _, _) as e -> raise e - | _ -> + | e -> + D.debug "%s: returning no response because of the exception: %s" + __FUNCTION__ (Printexc.to_string e) ; None (** See perftest/tests.ml *) From fe21d69622327d4fd7238bb490b48d4fa6a4e1f8 Mon Sep 17 00:00:00 2001 From: Mark Date: Tue, 27 Aug 2024 10:42:40 +0100 Subject: [PATCH 261/341] CP-51278: define import_activate datapath operation This is to be used as part of the inbound storage migration process and will make available a UNIX domain socket which can have an open file descriptor passed to it via SCM_RIGHTS. This takes the place of the /run/blktap-control/nbdserver. path used in SXM on SMAPIv1. Implementations shall arrange to make available a mechanism whereby an open file descriptor can be passed and then used to connect to an active new-fixed NBD server providing access to the specified [uri] Signed-off-by: Mark --- ocaml/xapi-storage/generator/lib/data.ml | 25 ++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ocaml/xapi-storage/generator/lib/data.ml b/ocaml/xapi-storage/generator/lib/data.ml index 142848b4d6d..e4571892f71 100644 --- a/ocaml/xapi-storage/generator/lib/data.ml +++ b/ocaml/xapi-storage/generator/lib/data.ml @@ -68,6 +68,14 @@ let domain = ~description:["An opaque string which represents the Xen domain."] domain +(** Path to a UNIX domain socket *) +type sock_path = string [@@deriving rpcty] + +let sock_path = + Param.mk ~name:"sock_path" + ~description:["A path to a UNIX domain socket in the filesystem."] + sock_path + open Idl module Datapath (R : RPC) = struct @@ -132,6 +140,23 @@ module Datapath (R : RPC) = struct ] (dbg @-> uri_p @-> domain @-> returning unit error) + let import_activate = + declare "import_activate" + [ + "[import_activate uri domain] prepares a connection to the " + ; " storage named by [uri] for use by inbound import mirroring, " + ; "the [domain] parameter identifies which domain to connect to, " + ; "most likely 0 or a custom storage domain. The return value is a " + ; "path to a UNIX domain socket to which an open file descriptor " + ; "may be passed, by SCM_RIGHTS. This, in turn, will become " + ; "the server end of a Network Block Device (NBD) connection " + ; "using, new-fixed protocol. Implementations shall declare the " + ; "VDI_MIRROR_IN feature for this method to be supported. It is " + ; "expected that activate will have been previously called so that " + ; "there is an active datapath." + ] + (dbg @-> uri_p @-> domain @-> returning sock_path error) + let deactivate = declare "deactivate" [ From e5088b6984f1541156ab0fe825c4f4861b974686 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 29 Aug 2024 13:21:13 +0100 Subject: [PATCH 262/341] quicktest: disable open 1024 fds on startup for now MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've enabled running the unixext tests in quicktest. They open 1024 file descriptors on startup to check for the absence of select. And although that works for unixext (it is select-free), it doesn't yet work for the rest of quicktests on master (the required changes to make it work are on the epoll branch). Temporarily disable this test and add a note to reenable it on the epoll branch. Fixes: efcb7af9d9d2 ("CP-50448: run the QuickCheck tests in QuickTest") Signed-off-by: Edwin Török --- .../libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml index 83bc7f00bd2..7953076844b 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml @@ -286,4 +286,5 @@ let tests = let () = (* avoid SIGPIPE *) let (_ : Sys.signal_behavior) = Sys.signal Sys.sigpipe Sys.Signal_ignore in - Xapi_stdext_unix.Unixext.test_open 1024 + (* TODO: reenable once the epoll branch is merged Xapi_stdext_unix.Unixext.test_open 1024 *) + () From bb0411c2c376f7d8a6ef0b1eee442469c3997174 Mon Sep 17 00:00:00 2001 From: Gang Ji Date: Fri, 30 Aug 2024 14:39:41 +0800 Subject: [PATCH 263/341] Fixup link. Signed-off-by: Gang Ji --- doc/content/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/content/_index.md b/doc/content/_index.md index 549b2152ed6..20a6f95a521 100644 --- a/doc/content/_index.md +++ b/doc/content/_index.md @@ -8,7 +8,7 @@ The **XAPI Toolstack**: - Forms the control plane of both [XenServer](http://xenserver.com) as well as [xcp-ng](http://xcp-ng.org), - manages clusters of Xen hosts with shared storage and networking, -- has a full-featured [API](http://xapi-project-github.io/xen-api), used by clients such as +- has a full-featured [API](http://xapi-project.github.io/xen-api), used by clients such as [XenCenter](https://github.com/xenserver/xenadmin) and [Xen Orchestra](https://xen-orchestra.com). The XAPI Toolstack is an open-source project developed by the [xapi From 7d98cc559e3daa0002cb1aae2c7cceb144ebe95d Mon Sep 17 00:00:00 2001 From: Gang Ji Date: Fri, 30 Aug 2024 18:23:04 +0800 Subject: [PATCH 264/341] Update VM failover planning document. Signed-off-by: Gang Ji --- doc/content/toolstack/features/HA/index.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/content/toolstack/features/HA/index.md b/doc/content/toolstack/features/HA/index.md index 45918ac9269..b0db7feac5d 100644 --- a/doc/content/toolstack/features/HA/index.md +++ b/doc/content/toolstack/features/HA/index.md @@ -531,7 +531,7 @@ type ('a, 'b) configuration = { Note that: - the memory required by the VMs listed in `placement` has already been - substracted from the total memory of the hosts; it doesn't need to be + substracted from the free memory of the hosts; it doesn't need to be subtracted again. - the free memory of each host has already had per-host miscellaneous overheads subtracted from it, including that used by unprotected VMs, @@ -551,10 +551,10 @@ sig end ``` -The function `get_specific_plan` takes a configuration and a list of Hosts -which have failed. It returns a VM restart plan represented as a VM to Host -association list. This is the function called by the -background HA VM restart thread on the master. +The function `get_specific_plan` takes a configuration and a list of VMs( +the host where they are resident on have failed). It returns a VM restart +plan represented as a VM to Host association list. This is the function +called by the background HA VM restart thread on the master. The function `plan_always_possible` returns true if every sequence of Host failures of length From b60a081d80e22bfa2527a737bf96ec90a377083e Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 2 Sep 2024 14:03:12 +0100 Subject: [PATCH 265/341] xe autocompletion: Only show required/optional prefixes when parameter name is empty Otherwise, these prefixes break autocompletion of common prefixes, like in: ``` $ xe pool-param-get OPTIONAL: database: REQUIRED: param-name= OPTIONAL: param-key= REQUIRED: uuid= $ xe pool-param-get p <- gets autocompleted before showing further suggestions $ xe pool-param-get param- param-key= param-name= ``` Without this fix, instead worked like this: ``` $ xe pool-param-get OPTIONAL: database: REQUIRED: param-name= OPTIONAL: param-key= REQUIRED: uuid= $ xe pool-param-get p <-- no prefix autocompletion OPTIONAL: param-key= REQUIRED: param-name= ``` This is a temporary workaround, since upgrading bash would allow us to show prefixes as section headers without polluting suggested words themselves. Signed-off-by: Andrii Sultanov --- ocaml/xe-cli/bash-completion | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion index 0cf725eb76c..0da9be29c0e 100644 --- a/ocaml/xe-cli/bash-completion +++ b/ocaml/xe-cli/bash-completion @@ -617,7 +617,9 @@ _xe() __xe_debug "triggering autocompletion for parameter names, param is '$param'" IFS=$'\n,' - REQD_OPTIONAL_PARAMS=1 + if [ ! "$param" ]; then + REQD_OPTIONAL_PARAMS=1 + fi get_params_for_command "${OLDSTYLE_WORDS[1]}" # Don't suggest already provided parameters From 4594bf2279f09785cf6ac7656b91f7bf387a556b Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Fri, 30 Aug 2024 14:14:39 +0100 Subject: [PATCH 266/341] Introduce mli for xapi_clustering Xapi_clustering is used in many places outside of this module alone, so introduce a mli for it. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_clustering.mli | 89 ++++++++++++++++++++++++++++++++++ quality-gate.sh | 2 +- 2 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 ocaml/xapi/xapi_clustering.mli diff --git a/ocaml/xapi/xapi_clustering.mli b/ocaml/xapi/xapi_clustering.mli new file mode 100644 index 00000000000..667c004aeed --- /dev/null +++ b/ocaml/xapi/xapi_clustering.mli @@ -0,0 +1,89 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +val set_ha_cluster_stack : __context:Context.t -> unit + +val with_clustering_lock : string -> (unit -> 'a) -> 'a + +val pif_of_host : + __context:Context.t -> API.ref_network -> API.ref_host -> 'a Ref.t * API.pIF_t + +val ip_of_pif : 'a Ref.t * API.pIF_t -> Cluster_interface.address + +val assert_pif_prerequisites : 'a Ref.t * API.pIF_t -> unit + +val assert_pif_attached_to : + __context:Context.t -> host:[`host] Ref.t -> pIF:[`PIF] Ref.t -> unit + +val handle_error : Cluster_interface.error -> 'a + +val assert_cluster_host_can_be_created : + __context:Context.t -> host:'a Ref.t -> unit + +val get_required_cluster_stacks : + __context:Context.t -> sr_sm_type:string -> string list + +val assert_cluster_stack_valid : cluster_stack:string -> unit + +val with_clustering_lock_if_needed : + __context:Context.t -> sr_sm_type:string -> string -> (unit -> 'a) -> 'a + +val with_clustering_lock_if_cluster_exists : + __context:Context.t -> string -> (unit -> 'a) -> 'a + +val find_cluster_host : + __context:Context.t -> host:[`host] Ref.t -> 'a Ref.t option + +val get_network_internal : + __context:Context.t -> self:[`Cluster] Ref.t -> [`network] Ref.t + +val assert_cluster_host_enabled : + __context:Context.t -> self:[`Cluster_host] Ref.t -> expected:bool -> unit + +val assert_operation_host_target_is_localhost : + __context:Context.t -> host:[`host] Ref.t -> unit + +val assert_cluster_host_has_no_attached_sr_which_requires_cluster_stack : + __context:Context.t -> self:[`Cluster_host] Ref.t -> unit + +module Daemon : sig + val enable : __context:Context.t -> unit + + val disable : __context:Context.t -> unit + + val restart : __context:Context.t -> unit +end + +val rpc : __context:Context.t -> Rpc.call -> Rpc.response Idl.IdM.t + +val maybe_switch_cluster_stack_version : + __context:Context.t + -> self:'a Ref.t + -> cluster_stack:Cluster_interface.Cluster_stack.t + -> unit + +val assert_cluster_host_is_enabled_for_matching_sms : + __context:Context.t -> host:[`host] Ref.t -> sr_sm_type:string -> unit + +val is_clustering_disabled_on_host : + __context:Context.t -> [`host] Ref.t -> bool + +val compute_corosync_max_host_failures : __context:Context.t -> int + +module Watcher : sig + val on_corosync_update : + __context:Context.t -> cluster:[`Cluster] Ref.t -> string list -> unit + + val signal_exit : unit -> unit + + val create_as_necessary : __context:Context.t -> host:[`host] Ref.t -> unit +end diff --git a/quality-gate.sh b/quality-gate.sh index 4c3c1da3a3a..ab741ef3445 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=511 + N=510 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) From 5d15761f8ac3f50e9a3cedb65ca69a4eed73e66d Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Fri, 30 Aug 2024 14:18:09 +0100 Subject: [PATCH 267/341] Make Daemon.enabled as an Atomic.t It seems that `Daemon.enabled` is read and write in many places, make it Atomic to prevent (future) race conditions. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_cluster_host.ml | 2 +- ocaml/xapi/xapi_clustering.ml | 12 +++++++----- ocaml/xapi/xapi_clustering.mli | 2 ++ ocaml/xapi/xapi_observer_components.ml | 4 +++- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/ocaml/xapi/xapi_cluster_host.ml b/ocaml/xapi/xapi_cluster_host.ml index c55d789b8d9..3976d95f9eb 100644 --- a/ocaml/xapi/xapi_cluster_host.ml +++ b/ocaml/xapi/xapi_cluster_host.ml @@ -361,7 +361,7 @@ let enable ~__context ~self = in (* TODO: Pass these through from CLI *) - if not !Xapi_clustering.Daemon.enabled then ( + if not (Xapi_clustering.Daemon.is_enabled ()) then ( D.debug "Cluster_host.enable: xapi-clusterd not running - attempting to start" ; Xapi_clustering.Daemon.enable ~__context diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index 249efa74da1..13e03f1ee5d 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -250,7 +250,9 @@ let assert_cluster_host_has_no_attached_sr_which_requires_cluster_stack raise Api_errors.(Server_error (cluster_stack_in_use, [cluster_stack])) module Daemon = struct - let enabled = ref false + let enabled = Atomic.make false + + let is_enabled () = Atomic.get enabled let maybe_call_script ~__context script params = match Context.get_test_clusterd_rpc __context with @@ -283,13 +285,13 @@ module Daemon = struct (internal_error, [Printf.sprintf "could not start %s" service]) ) ) ; - enabled := true ; + Atomic.set enabled true ; debug "Cluster daemon: enabled & started" let disable ~__context = let port = string_of_int !Xapi_globs.xapi_clusterd_port in debug "Disabling and stopping the clustering daemon" ; - enabled := false ; + Atomic.set enabled false ; maybe_call_script ~__context !Xapi_globs.systemctl ["disable"; service] ; maybe_call_script ~__context !Xapi_globs.systemctl ["stop"; service] ; maybe_call_script ~__context @@ -309,7 +311,7 @@ end * Instead of returning an empty URL which wouldn't work just raise an * exception. *) let rpc ~__context = - if not !Daemon.enabled then + if not (Daemon.is_enabled ()) then raise Api_errors.( Server_error @@ -596,7 +598,7 @@ module Watcher = struct Atomic.set cluster_change_watcher false let watch_cluster_stack_version ~__context ~host = - if !Daemon.enabled then + if Daemon.is_enabled () then match find_cluster_host ~__context ~host with | Some ch -> let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self:ch in diff --git a/ocaml/xapi/xapi_clustering.mli b/ocaml/xapi/xapi_clustering.mli index 667c004aeed..7fceae58118 100644 --- a/ocaml/xapi/xapi_clustering.mli +++ b/ocaml/xapi/xapi_clustering.mli @@ -56,6 +56,8 @@ val assert_cluster_host_has_no_attached_sr_which_requires_cluster_stack : __context:Context.t -> self:[`Cluster_host] Ref.t -> unit module Daemon : sig + val is_enabled : unit -> bool + val enable : __context:Context.t -> unit val disable : __context:Context.t -> unit diff --git a/ocaml/xapi/xapi_observer_components.ml b/ocaml/xapi/xapi_observer_components.ml index 797c236b248..d3e0587b143 100644 --- a/ocaml/xapi/xapi_observer_components.ml +++ b/ocaml/xapi/xapi_observer_components.ml @@ -48,7 +48,9 @@ let all = List.map of_string Constants.observer_components_all This does mean that observer will always be enabled for clusterd. *) let startup_components () = List.filter - (function Xapi_clusterd -> !Xapi_clustering.Daemon.enabled | _ -> true) + (function + | Xapi_clusterd -> Xapi_clustering.Daemon.is_enabled () | _ -> true + ) all let assert_valid_components components = From 801b08711fc4ff9cf77b67777c6992396043b4ab Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Fri, 30 Aug 2024 14:33:28 +0100 Subject: [PATCH 268/341] CA-398438: Signal exit to the watcher thread This solves the problem where disabling and enabling clustering sometimes makes the thread not start, because it is still in the middle of processing, and by the time it reaches the end and reset the `cluster_change_watcher`, `create_as_necessary` has already be called with `cluster_change_watcher` equal to true. Therefore set `cluster_change_watcher` immediately to false as soon as the cluster is to be destroyed, or the coordinator is to be changed. Note, however, in the case of coorindator change, the watcher will not be created until the new coordinator has restarted. This is ok since the new coordinator will only be picked up on next boot. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_cluster.ml | 2 ++ ocaml/xapi/xapi_cluster_host.ml | 1 + ocaml/xapi/xapi_clustering.ml | 51 +++++++++++++++++++++++++----- ocaml/xapi/xapi_pool_transition.ml | 2 ++ 4 files changed, 48 insertions(+), 8 deletions(-) diff --git a/ocaml/xapi/xapi_cluster.ml b/ocaml/xapi/xapi_cluster.ml index cfa55fde2c7..355bf175527 100644 --- a/ocaml/xapi/xapi_cluster.ml +++ b/ocaml/xapi/xapi_cluster.ml @@ -126,6 +126,7 @@ let create ~__context ~pIF ~cluster_stack ~pool_auto_join ~token_timeout | Error error -> D.warn "Error occurred during Cluster.create. Shutting down cluster daemon" ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context ; handle_error error ) @@ -156,6 +157,7 @@ let destroy ~__context ~self = Db.Cluster.destroy ~__context ~self ; D.debug "Cluster destroyed successfully" ; set_ha_cluster_stack ~__context ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context (* Get pool master's cluster_host, return network of PIF *) diff --git a/ocaml/xapi/xapi_cluster_host.ml b/ocaml/xapi/xapi_cluster_host.ml index 3976d95f9eb..9644ca8cd78 100644 --- a/ocaml/xapi/xapi_cluster_host.ml +++ b/ocaml/xapi/xapi_cluster_host.ml @@ -261,6 +261,7 @@ let destroy_op ~__context ~self ~force = ) ; Db.Cluster_host.destroy ~__context ~self ; debug "Cluster_host.%s was successful" fn_str ; + Xapi_clustering.Watcher.signal_exit () ; Xapi_clustering.Daemon.disable ~__context | Error error -> warn "Error occurred during Cluster_host.%s" fn_str ; diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index 13e03f1ee5d..c48b4368f08 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -429,6 +429,8 @@ let compute_corosync_max_host_failures ~__context = corosync_ha_max_hosts module Watcher = struct + module Delay = Xapi_stdext_threads.Threadext.Delay + let routine_updates = "routine updates" let on_corosync_update ~__context ~cluster updates = @@ -554,14 +556,40 @@ module Watcher = struct from corosync represents a consistent snapshot of the current cluster state. *) let stabilising_period = Mtime.Span.(5 * s) + (* The delay on which the watcher will wait. *) + let delay = Delay.make () + + let finish_watch = Atomic.make false + let cluster_stack_watcher : bool Atomic.t = Atomic.make false + (* This function exists to store the fact that the watcher should be destroyed, + to avoid the race that the cluster is destroyed, while the watcher is + still waiting/stabilising. + + There are two cases this function shall be called: 1. when the clustering + is to be disabled; 2. when this host is no longer the coordinator. For the second + case it is only necessary to do this when there is a manual designation of a new + master since in the case of ha the old coordinator would have died, and so would + this thread on the old coordinator. *) + let signal_exit () = + D.debug "%s: Signaled to exit cluster watcher" __FUNCTION__ ; + Delay.signal delay ; + (* set the cluster change watcher back to false as soon as we are signalled + to prevent any race conditions *) + Atomic.set cluster_change_watcher false ; + D.debug + "%s: watcher for cluster change exit, reset cluster_change_watcher back \ + to false" + __FUNCTION__ ; + Atomic.set finish_watch true + (* we handle unclean hosts join and leave in the watcher, i.e. hosts joining and leaving due to network problems, power cut, etc. Join and leave initiated by the API will be handled in the API call themselves, but they share the same code as the watcher. *) let watch_cluster_change ~__context ~host = - while !Daemon.enabled do + while not (Atomic.get finish_watch) do let m = Cluster_client.LocalClient.UPDATES.get (rpc ~__context) "cluster change watcher call" @@ -571,9 +599,13 @@ module Watcher = struct match find_cluster_host ~__context ~host with | Some ch -> let cluster = Db.Cluster_host.get_cluster ~__context ~self:ch in - if wait then - Thread.delay (Clock.Timer.span_to_s stabilising_period) ; - on_corosync_update ~__context ~cluster updates + if not wait then + on_corosync_update ~__context ~cluster updates + else if + wait + && Clock.Timer.span_to_s stabilising_period |> Delay.wait delay + then + on_corosync_update ~__context ~cluster updates | None -> () in @@ -593,9 +625,11 @@ module Watcher = struct | exception exn -> warn "%s: Got exception %s while query cluster host updates, retrying" __FUNCTION__ (Printexc.to_string exn) ; - Thread.delay (Clock.Timer.span_to_s cluster_change_interval) - done ; - Atomic.set cluster_change_watcher false + let _ : bool = + Clock.Timer.span_to_s cluster_change_interval |> Delay.wait delay + in + () + done let watch_cluster_stack_version ~__context ~host = if Daemon.is_enabled () then @@ -637,11 +671,12 @@ module Watcher = struct There is no need to destroy them: once the clustering daemon is disabled, these threads will exit as well. *) let create_as_necessary ~__context ~host = - if Helpers.is_pool_master ~__context ~host then ( + if Helpers.is_pool_master ~__context ~host && Daemon.is_enabled () then ( if Xapi_cluster_helpers.cluster_health_enabled ~__context then if Atomic.compare_and_set cluster_change_watcher false true then ( debug "%s: create watcher for corosync-notifyd on coordinator" __FUNCTION__ ; + Atomic.set finish_watch false ; let _ : Thread.t = Thread.create (fun () -> watch_cluster_change ~__context ~host) () in diff --git a/ocaml/xapi/xapi_pool_transition.ml b/ocaml/xapi/xapi_pool_transition.ml index 6ff8f892bd9..8f6a315f591 100644 --- a/ocaml/xapi/xapi_pool_transition.ml +++ b/ocaml/xapi/xapi_pool_transition.ml @@ -215,6 +215,8 @@ let become_another_masters_slave master_address = if Pool_role.get_role () = new_role then debug "We are already a slave of %s; nothing to do" master_address else ( + if Pool_role.is_master () then (* I am the old master *) + Xapi_clustering.Watcher.signal_exit () ; debug "Setting pool.conf to point to %s" master_address ; set_role new_role ; run_external_scripts false ; From 53a8b495048795645e4dc894f94fea46688126a5 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Fri, 30 Aug 2024 14:38:16 +0100 Subject: [PATCH 269/341] Remove the condition check for Daemon.enabled On `cluster_stack_watcher`, as this is already done when creating the thread. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_clustering.ml | 62 +++++++++++++++++------------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index c48b4368f08..3db9ad1a703 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -632,39 +632,39 @@ module Watcher = struct done let watch_cluster_stack_version ~__context ~host = - if Daemon.is_enabled () then - match find_cluster_host ~__context ~host with - | Some ch -> - let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self:ch in - let cluster_rec = - Db.Cluster.get_record ~__context ~self:cluster_ref - in - if - Cluster_stack.of_version - ( cluster_rec.API.cluster_cluster_stack - , cluster_rec.API.cluster_cluster_stack_version - ) - = Cluster_stack.Corosync2 - then ( - debug "%s: Detected Corosync 2 running as cluster stack" - __FUNCTION__ ; - let body = - "The current cluster stack version of Corosync 2 is out of date, \ - consider updating to Corosync 3" - in - let name, priority = Api_messages.cluster_stack_out_of_date in - let host_uuid = Db.Host.get_uuid ~__context ~self:host in - - Helpers.call_api_functions ~__context (fun rpc session_id -> - let _ : [> `message] Ref.t = - Client.Client.Message.create ~rpc ~session_id ~name ~priority - ~cls:`Host ~obj_uuid:host_uuid ~body - in - () + match find_cluster_host ~__context ~host with + | Some ch -> + let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self:ch in + let cluster_rec = Db.Cluster.get_record ~__context ~self:cluster_ref in + if + Cluster_stack.of_version + ( cluster_rec.API.cluster_cluster_stack + , cluster_rec.API.cluster_cluster_stack_version ) + = Cluster_stack.Corosync2 + then ( + debug "%s: Detected Corosync 2 running as cluster stack" __FUNCTION__ ; + let body = + "The current cluster stack version of Corosync 2 is out of date, \ + consider updating to Corosync 3" + in + let name, priority = Api_messages.cluster_stack_out_of_date in + let host_uuid = Db.Host.get_uuid ~__context ~self:host in + + Helpers.call_api_functions ~__context (fun rpc session_id -> + let _ : [> `message] Ref.t = + Client.Client.Message.create ~rpc ~session_id ~name ~priority + ~cls:`Host ~obj_uuid:host_uuid ~body + in + () ) - | None -> - debug "%s: No cluster host, no need to watch" __FUNCTION__ + ) else + debug + "%s: Detected Corosync 3 as cluster stack, not generating a \ + warning messsage" + __FUNCTION__ + | None -> + debug "%s: No cluster host, no need to watch" __FUNCTION__ (** [create_as_necessary] will create cluster watchers on the coordinator if they are not already created. From 24b4626912373e9a13ec54ffd1e71350fc4cd19e Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Tue, 3 Sep 2024 11:03:40 +0100 Subject: [PATCH 270/341] Add more debug messages for watcher creation This helps debug why sometimes a watcher is not created. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_clustering.ml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index 3db9ad1a703..9f21b4c43c4 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -671,7 +671,9 @@ module Watcher = struct There is no need to destroy them: once the clustering daemon is disabled, these threads will exit as well. *) let create_as_necessary ~__context ~host = - if Helpers.is_pool_master ~__context ~host && Daemon.is_enabled () then ( + let is_master = Helpers.is_pool_master ~__context ~host in + let daemon_enabled = Daemon.is_enabled () in + if is_master && daemon_enabled then ( if Xapi_cluster_helpers.cluster_health_enabled ~__context then if Atomic.compare_and_set cluster_change_watcher false true then ( debug "%s: create watcher for corosync-notifyd on coordinator" @@ -703,5 +705,9 @@ module Watcher = struct ) else debug "%s: not create watcher for cluster stack as it already exists" __FUNCTION__ - ) + ) else + debug + "%s not create watcher because we are %b master and clustering is \ + enabled %b " + __FUNCTION__ is_master daemon_enabled end From 911bc2d2fa989fec57914616ae0ade2c44e0220a Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 3 Sep 2024 14:38:01 +0100 Subject: [PATCH 271/341] xe autocompletion: Exclude previously entered parameters before deciding whether to show optionality of the parameters Fixes the bug when optionality of parameters would be shown when all the remaining parameters were of the same category, autocompleting the noise: ``` $ xe vm-param-get OPTIONAL: database: REQUIRED: param-name= OPTIONAL: param-key= REQUIRED: uuid= $ xe vm-param-get param-name=SMTH uuid=SMTH <- automatically completes to $ xe vm-param-get param-name=SMTH uuid=SMTH OPTIONAL: ``` Now works as intended: ``` $ xe vm-param-get param-name=SMTH uuid=SMTH database: param-key= ``` Signed-off-by: Andrii Sultanov --- ocaml/xe-cli/bash-completion | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion index 0da9be29c0e..0c29a5446b9 100644 --- a/ocaml/xe-cli/bash-completion +++ b/ocaml/xe-cli/bash-completion @@ -814,15 +814,14 @@ set_completions() if [[ $REQD_OPTIONAL_PARAMS == 1 ]]; then local reqd_params=$( __preprocess_suggestions "$REQD_PARAMS" ) local opt_params=$( __preprocess_suggestions "$OPT_PARAMS" ) + if [[ "$excludes" ]]; then + reqd_params=$(echo "$reqd_params" | eval "grep -v $excludes") + opt_params=$(echo "$opt_params" | eval "grep -v $excludes") + fi if [[ "$reqd_params" && "$opt_params" ]]; then __xe_debug "showing optional/required parameters" SHOW_DESCRIPTION=1 - if [[ "$excludes" ]]; then - reqd_params=$(echo "$reqd_params" | eval "grep -v $excludes") - opt_params=$(echo "$opt_params" | eval "grep -v $excludes") - fi - for word in $reqd_params; do __add_completion "$word" "REQUIRED" "$max_cmd_length" done From 2ba0399052c4daff495976094726bd976c64d0ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 4 Sep 2024 13:28:33 +0100 Subject: [PATCH 272/341] fix(CI): feature/py3 has been merged, refer to master now MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This causes problems with forks that don't have feature/py3. Although would it then compare with the fork's master branch which would still be out-of-date? Signed-off-by: Edwin Török --- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 124645bd875..e8fb2f37e0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,7 +67,7 @@ repos: name: check that the Python3 test suite in passes entry: env PYTHONDEVMODE=yes sh -c 'coverage run && coverage xml && coverage html && coverage report && - diff-cover --ignore-whitespace --compare-branch=origin/feature/py3 + diff-cover --ignore-whitespace --compare-branch=origin/master --show-uncovered --html-report .git/coverage-diff.html --fail-under 50 .git/coverage3.11.xml' require_serial: true @@ -111,7 +111,7 @@ repos: stages: [push] name: check that changes to python3 tree pass pylint entry: diff-quality --violations=pylint - --ignore-whitespace --compare-branch=origin/feature/py3 + --ignore-whitespace --compare-branch=origin/master pass_filenames: false language: python types: [python] diff --git a/pyproject.toml b/pyproject.toml index 55467081438..512eac89030 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -275,7 +275,7 @@ xfail_strict = true # is used to fail tests that are marked as xfail but pass(f [tool.pytype_reporter] -default_branch = "feature/py3" +default_branch = "master" discard_messages_matching = [ "Couldn't import pyi for 'xml.dom.minidom'", "No attribute '.*' on RRDContentHandler", From b935761dc984ac6e0e9ad6435410b17c7e49578f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 4 Sep 2024 15:21:11 +0100 Subject: [PATCH 273/341] fix(WLS): disable non-root unit test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fails when running kojienv from WSL2, but works when running kojienv from an actual Linux. In a WSL2 kojienv /dev/loop0 is world writable (even though the user is not root), whereas in a real kojienv /dev/loop0 is not wordl writable. This test was here just to test my assumptions that indeed non-root cannot create block devices, assumptions which are not true in WSL2, so remove this test. In Dom0 we run as root always, so this has no effect on the product. Signed-off-by: Edwin Török --- ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml b/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml index bd8664e9c87..65fb540650b 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-fdcaps/test/test_operations.ml @@ -197,9 +197,6 @@ let test_block () = in if Unix.geteuid () = 0 then run () - else - Alcotest.check_raises "non-root fails to create blockdevice" - (Failure "with_temp_blk") run in test_fd with_make [("read", read_fd); ("write", write_fd); ("lseek", test_lseek)] From d9534cf0fa4dbb4ace6c78c69764d3447898a426 Mon Sep 17 00:00:00 2001 From: Mark Syms Date: Wed, 4 Sep 2024 15:20:44 +0100 Subject: [PATCH 274/341] Update the docs for Volume.compose * Give the parameters meaningful names * Describe host this might be implemented with delta files * Declare that the parent volume should be considered invalid after completion. Signed-off-by: Mark Syms --- ocaml/xapi-storage/generator/lib/control.ml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi-storage/generator/lib/control.ml b/ocaml/xapi-storage/generator/lib/control.ml index e34c0183c36..e7f9274c48a 100644 --- a/ocaml/xapi-storage/generator/lib/control.ml +++ b/ocaml/xapi-storage/generator/lib/control.ml @@ -421,9 +421,15 @@ module Volume (R : RPC) = struct let compose = R.declare "compose" [ - "[compose sr volume1 volume2] layers the updates from [volume2] onto" - ; "[volume1], modifying [volume2]. Implementations shall declare the" - ; "VDI_COMPOSE feature for this method to be supported." + "[compose sr child_volume parent_volume] layers the updates from " + ; "[child_volume] onto [parent_volume], modifying [child_volume]. " + ; "In the case of a delta file format this means updating the " + ; "[child_volume] to have a parent or backing object defined by " + ; "[parent_volume]. Implementations shall declare the VDI_COMPOSE " + ; "feature for this method to be supported. After a successful " + ; "return it should be assumed that the [parent_volume] is no " + ; "longer valid. Calling SR.ls, will return the list of currently " + ; "known, valid, volumes." ] (dbg @-> sr @-> key @-> key2 @-> returning unit errors) From b01cdbeeaf263763bf99b3b110407833f1a9f444 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 29 Aug 2024 17:07:32 +0100 Subject: [PATCH 275/341] CP-51042: Introduce new SR.scan2 for SMAPI{v1,v2,v3} Add a new API SR.scan2 as a toplevel function which will will be implemented differently on SMAPIv1 and SMAPIv3. But not calling it yet, as that will be done in the following commit. On SMAPIv1, this will be implemented as a scan followed by a stat, which is as implemented before. It is important to keep this ordering because the storage backend relies on the scan call to do the resizing. On SMAPIv3, this is implemented as a stat followed by an ls, and the ls is only called if the SR is healthy, as returned by the stat. This is Ok since on SMAPIv3 SR.stat does the resizing instead. Signed-off-by: Vincent Liu --- ocaml/xapi-idl/storage/dune | 2 +- ocaml/xapi-idl/storage/storage_interface.ml | 20 +++++- ocaml/xapi-idl/storage/storage_skeleton.ml | 2 + ocaml/xapi-storage-script/dune | 1 + ocaml/xapi-storage-script/main.ml | 68 +++++++++++++++++++++ ocaml/xapi/storage_mux.ml | 8 +++ ocaml/xapi/storage_smapiv1_wrapper.ml | 14 +++++ ocaml/xapi/xapi_sr.ml | 33 ++++++++++ 8 files changed, 145 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi-idl/storage/dune b/ocaml/xapi-idl/storage/dune index 05f146429bc..036e8dedd89 100644 --- a/ocaml/xapi-idl/storage/dune +++ b/ocaml/xapi-idl/storage/dune @@ -28,7 +28,7 @@ xapi-log ) (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (preprocess (pps ppx_sexp_conv ppx_deriving_rpc ppx_deriving.show))) (library (name xcp_storage) diff --git a/ocaml/xapi-idl/storage/storage_interface.ml b/ocaml/xapi-idl/storage/storage_interface.ml index 698997ac0cd..02513ae4936 100644 --- a/ocaml/xapi-idl/storage/storage_interface.ml +++ b/ocaml/xapi-idl/storage/storage_interface.ml @@ -233,7 +233,7 @@ let default_vdi_info = failwith (Printf.sprintf "Error creating default_vdi_info: %s" m) type sr_health = Healthy | Recovering | Unreachable | Unavailable -[@@deriving rpcty] +[@@deriving rpcty, show {with_path= false}] type sr_info = { sr_uuid: string option @@ -354,6 +354,7 @@ module Errors = struct | Cancelled of string | Redirect of string option | Sr_attached of string + | Sr_unhealthy of sr_health | Unimplemented of string | Activated_on_another_host of uuid | Duplicated_key of string @@ -617,12 +618,24 @@ module StorageAPI (R : RPC) = struct let destroy = declare "SR.destroy" [] (dbg_p @-> sr_p @-> returning unit_p err) - (** [scan task sr] returns a list of VDIs contained within an attached SR *) + (** [scan task sr] returns a list of VDIs contained within an attached SR. + @deprecated This function is deprecated, and is only here to keep backward + compatibility with old xapis that call Remote.SR.scan during SXM. + Use the scan2 function instead. + *) let scan = let open TypeCombinators in let result = Param.mk ~name:"result" (list vdi_info) in declare "SR.scan" [] (dbg_p @-> sr_p @-> returning result err) + (** [scan2 task sr] returns a list of VDIs contained within an attached SR, + as well as the sr_info of the scanned [sr]. This operation is implemented as + a combination of scan and stats. *) + let scan2 = + let open TypeCombinators in + let result = Param.mk ~name:"result" (pair (list vdi_info, sr_info)) in + declare "SR.scan2" [] (dbg_p @-> sr_p @-> returning result err) + (** [update_snapshot_info_src sr vdi url dest dest_vdi snapshot_pairs] * updates the fields is_a_snapshot, snapshot_time and snapshot_of for a * list of snapshots on a remote SR. *) @@ -1160,6 +1173,8 @@ module type Server_impl = sig val scan : context -> dbg:debug_info -> sr:sr -> vdi_info list + val scan2 : context -> dbg:debug_info -> sr:sr -> vdi_info list * sr_info + val update_snapshot_info_src : context -> dbg:debug_info @@ -1449,6 +1464,7 @@ module Server (Impl : Server_impl) () = struct S.SR.reset (fun dbg sr -> Impl.SR.reset () ~dbg ~sr) ; S.SR.destroy (fun dbg sr -> Impl.SR.destroy () ~dbg ~sr) ; S.SR.scan (fun dbg sr -> Impl.SR.scan () ~dbg ~sr) ; + S.SR.scan2 (fun dbg sr -> Impl.SR.scan2 () ~dbg ~sr) ; S.SR.update_snapshot_info_src (fun dbg sr vdi url dest dest_vdi snapshot_pairs verify_dest -> Impl.SR.update_snapshot_info_src () ~dbg ~sr ~vdi ~url ~dest ~dest_vdi diff --git a/ocaml/xapi-idl/storage/storage_skeleton.ml b/ocaml/xapi-idl/storage/storage_skeleton.ml index e91246b3146..25283ed473b 100644 --- a/ocaml/xapi-idl/storage/storage_skeleton.ml +++ b/ocaml/xapi-idl/storage/storage_skeleton.ml @@ -68,6 +68,8 @@ module SR = struct let scan ctx ~dbg ~sr = u "SR.scan" + let scan2 ctx ~dbg ~sr = u "SR.scan2" + let update_snapshot_info_src ctx ~dbg ~sr ~vdi ~url ~dest ~dest_vdi ~snapshot_pairs = u "SR.update_snapshot_info_src" diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index e27762a2963..5fa4f0f28fa 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -13,6 +13,7 @@ message-switch-async message-switch-unix + ppx_deriving.runtime result rpclib.core rpclib.json diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 98c46440249..196e3edfc07 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1207,6 +1207,74 @@ let bind ~volume_script_dir = |> wrap in S.SR.scan sr_scan_impl ; + let sr_scan2_impl dbg sr = + Attached_SRs.find sr + >>>= (fun sr -> + return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) + >>>= fun response -> + Deferred.Result.return + { + Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid + ; name_label= response.Xapi_storage.Control.name + ; name_description= response.Xapi_storage.Control.description + ; total_space= response.Xapi_storage.Control.total_space + ; free_space= response.Xapi_storage.Control.free_space + ; clustered= response.Xapi_storage.Control.clustered + ; health= + ( match response.Xapi_storage.Control.health with + | Xapi_storage.Control.Healthy _ -> + Healthy + | Xapi_storage.Control.Recovering _ -> + Recovering + | Xapi_storage.Control.Unreachable _ -> + Unreachable + | Xapi_storage.Control.Unavailable _ -> + Unavailable + ) + } + >>>= fun sr_info -> + match sr_info.health with + | Healthy -> + return_volume_rpc (fun () -> + Sr_client.ls + (volume_rpc ~dbg ~compat_out:Compat.compat_out_volumes) + dbg sr + ) + >>>= fun response -> + let response = Array.to_list response in + (* Filter out volumes which are clone-on-boot transients *) + let transients = + List.fold + ~f:(fun set x -> + match + List.Assoc.find x.Xapi_storage.Control.keys + _clone_on_boot_key ~equal:String.equal + with + | None -> + set + | Some transient -> + Set.add set transient + ) + ~init:Core.String.Set.empty response + in + let response = + List.filter + ~f:(fun x -> + not (Set.mem transients x.Xapi_storage.Control.key) + ) + response + in + Deferred.Result.return + (List.map ~f:vdi_of_volume response, sr_info) + | health -> + debug "%s: sr unhealthy %s" __FUNCTION__ + (Storage_interface.show_sr_health health) ; + Deferred.Result.fail + Storage_interface.(Errors.Sr_unhealthy health) + ) + |> wrap + in + S.SR.scan2 sr_scan2_impl ; let vdi_create_impl dbg sr (vdi_info : Storage_interface.vdi_info) = Attached_SRs.find sr >>>= (fun sr -> diff --git a/ocaml/xapi/storage_mux.ml b/ocaml/xapi/storage_mux.ml index 3a11ad0077f..b14476a3d9d 100644 --- a/ocaml/xapi/storage_mux.ml +++ b/ocaml/xapi/storage_mux.ml @@ -348,6 +348,14 @@ module Mux = struct end)) in C.SR.stat (Debug_info.to_string di) sr + let scan2 () ~dbg ~sr = + with_dbg ~name:"SR.scan2" ~dbg @@ fun di -> + info "SR.scan2 dbg:%s sr:%s" dbg (s_of_sr sr) ; + let module C = StorageAPI (Idl.Exn.GenClient (struct + let rpc = of_sr sr + end)) in + C.SR.scan2 (Debug_info.to_string di) sr + let scan () ~dbg ~sr = with_dbg ~name:"SR.scan" ~dbg @@ fun di -> info "SR.scan dbg:%s sr:%s" dbg (s_of_sr sr) ; diff --git a/ocaml/xapi/storage_smapiv1_wrapper.ml b/ocaml/xapi/storage_smapiv1_wrapper.ml index 04d0e99ecf8..2efa9194168 100644 --- a/ocaml/xapi/storage_smapiv1_wrapper.ml +++ b/ocaml/xapi/storage_smapiv1_wrapper.ml @@ -1210,6 +1210,20 @@ functor Impl.SR.scan context ~dbg ~sr ) + let scan2 context ~dbg ~sr = + with_dbg ~name:"SR.scan2" ~dbg @@ fun di -> + info "SR.scan2 dbg:%s sr:%s" di.log (s_of_sr sr) ; + let dbg = Debug_info.to_string di in + with_sr sr (fun () -> + match Host.find sr !Host.host with + | None -> + raise (Storage_error (Sr_not_attached (s_of_sr sr))) + | Some _ -> + let vs = Impl.SR.scan context ~dbg ~sr in + let sr_info = Impl.SR.stat context ~dbg ~sr in + (vs, sr_info) + ) + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = with_dbg ~name:"SR.create" ~dbg @@ fun di -> diff --git a/ocaml/xapi/xapi_sr.ml b/ocaml/xapi/xapi_sr.ml index 7b5186d5195..5856cab4ad3 100644 --- a/ocaml/xapi/xapi_sr.ml +++ b/ocaml/xapi/xapi_sr.ml @@ -776,6 +776,39 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = ) to_update +let scan2 ~__context ~sr = + let open Storage_access in + let task = Context.get_task_id __context in + let module C = Storage_interface.StorageAPI (Idl.Exn.GenClient (struct + let rpc = rpc + end)) in + let sr' = Ref.string_of sr in + SRScanThrottle.execute (fun () -> + transform_storage_exn (fun () -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + let vs, sr_info = + C.SR.scan2 (Ref.string_of task) + (Storage_interface.Sr.of_string sr_uuid) + in + let db_vdis = + Db.VDI.get_records_where ~__context + ~expr:(Eq (Field "SR", Literal sr')) + in + update_vdis ~__context ~sr db_vdis vs ; + let virtual_allocation = + List.fold_left Int64.add 0L + (List.map (fun v -> v.Storage_interface.virtual_size) vs) + in + Db.SR.set_virtual_allocation ~__context ~self:sr + ~value:virtual_allocation ; + Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space ; + Db.SR.set_physical_utilisation ~__context ~self:sr + ~value:(Int64.sub sr_info.total_space sr_info.free_space) ; + Db.SR.remove_from_other_config ~__context ~self:sr ~key:"dirty" ; + Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered + ) + ) + (* Perform a scan of this locally-attached SR *) let scan ~__context ~sr = let open Storage_access in From 23c4a0c5b16b6f0bc4e909c90171bc39b3860d6d Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 29 Aug 2024 17:13:41 +0100 Subject: [PATCH 276/341] Replace Xapi_sr.scan with Xapi_sr.scan2 In other words rename scan2 to scan and delete the original scan implementation. This is fine since the function signature remains the same, but internally Xapi_sr.scan will now call SR.scan2 rather than SR.scan. scans in other places, such as storage_mux.ml are left untouched for backwards compatability. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_sr.ml | 41 ++--------------------------------------- 1 file changed, 2 insertions(+), 39 deletions(-) diff --git a/ocaml/xapi/xapi_sr.ml b/ocaml/xapi/xapi_sr.ml index 5856cab4ad3..fc7e5dd768a 100644 --- a/ocaml/xapi/xapi_sr.ml +++ b/ocaml/xapi/xapi_sr.ml @@ -776,39 +776,6 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = ) to_update -let scan2 ~__context ~sr = - let open Storage_access in - let task = Context.get_task_id __context in - let module C = Storage_interface.StorageAPI (Idl.Exn.GenClient (struct - let rpc = rpc - end)) in - let sr' = Ref.string_of sr in - SRScanThrottle.execute (fun () -> - transform_storage_exn (fun () -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let vs, sr_info = - C.SR.scan2 (Ref.string_of task) - (Storage_interface.Sr.of_string sr_uuid) - in - let db_vdis = - Db.VDI.get_records_where ~__context - ~expr:(Eq (Field "SR", Literal sr')) - in - update_vdis ~__context ~sr db_vdis vs ; - let virtual_allocation = - List.fold_left Int64.add 0L - (List.map (fun v -> v.Storage_interface.virtual_size) vs) - in - Db.SR.set_virtual_allocation ~__context ~self:sr - ~value:virtual_allocation ; - Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space ; - Db.SR.set_physical_utilisation ~__context ~self:sr - ~value:(Int64.sub sr_info.total_space sr_info.free_space) ; - Db.SR.remove_from_other_config ~__context ~self:sr ~key:"dirty" ; - Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered - ) - ) - (* Perform a scan of this locally-attached SR *) let scan ~__context ~sr = let open Storage_access in @@ -820,8 +787,8 @@ let scan ~__context ~sr = SRScanThrottle.execute (fun () -> transform_storage_exn (fun () -> let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let vs = - C.SR.scan (Ref.string_of task) + let vs, sr_info = + C.SR.scan2 (Ref.string_of task) (Storage_interface.Sr.of_string sr_uuid) in let db_vdis = @@ -829,10 +796,6 @@ let scan ~__context ~sr = ~expr:(Eq (Field "SR", Literal sr')) in update_vdis ~__context ~sr db_vdis vs ; - let sr_info = - C.SR.stat (Ref.string_of task) - (Storage_interface.Sr.of_string sr_uuid) - in let virtual_allocation = List.fold_left Int64.add 0L (List.map (fun v -> v.Storage_interface.virtual_size) vs) From 1c3721967bdc35c510f694dbfc7ede4855d807e4 Mon Sep 17 00:00:00 2001 From: Colin James Date: Wed, 4 Sep 2024 12:47:53 +0100 Subject: [PATCH 277/341] CP-50422: Destroy authentication cache in disable_external_auth Upon successfully disabling external authentication (e.g. winbind), any extant external authentication cache is cleared (as to not undermine disabling of the external authentication route). We consider invalidating the cache as sufficient for our needs because external authentication would need to succeed for the cache to become populated again (even if it is recreated due to being enabled as a feature generally). Signed-off-by: Colin James --- ocaml/xapi/xapi_host.ml | 5 +++++ ocaml/xapi/xapi_session.ml | 6 ++++++ ocaml/xapi/xapi_session.mli | 2 ++ 3 files changed, 13 insertions(+) diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 05955958813..555cb3bb67c 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -1928,6 +1928,11 @@ let disable_external_auth_common ?(during_pool_eject = false) ~__context ~host (* succeeds because there's no need to initialize anymore *) + (* If any cache is present, clear it in order to ensure cached + logins don't persist after disabling external + authentication. *) + Xapi_session.clear_external_auth_cache () ; + (* 3. CP-703: we always revalidate all sessions after the external authentication has been disabled *) (* so that all sessions that were externally authenticated will be destroyed *) debug diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 802013ed326..43564278dda 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -838,8 +838,14 @@ module Caching = struct | Some prev_result -> prev_result ) + + let clear_cache () = + let@ () = with_lock lock in + cache := None end +let clear_external_auth_cache = Caching.clear_cache + (* CP-714: Modify session.login_with_password to first try local super-user login; and then call into external auth plugin if this is enabled 1. If the pool master's Host.external_auth_type field is not none, then the diff --git a/ocaml/xapi/xapi_session.mli b/ocaml/xapi/xapi_session.mli index 2dc98429f3e..853284f3c4c 100644 --- a/ocaml/xapi/xapi_session.mli +++ b/ocaml/xapi/xapi_session.mli @@ -110,3 +110,5 @@ val get_total_sessions : unit -> Int64.t val set_local_auth_max_threads : int64 -> unit val set_ext_auth_max_threads : int64 -> unit + +val clear_external_auth_cache : unit -> unit From f25818925134db9ec837ef46d9fbf0b1fb6d2360 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 3 Sep 2024 13:12:10 +0100 Subject: [PATCH 278/341] CP-32625: xenops-cli - replace handwritten JSON prettifier with yojson Previously, the output of `xenops-cli diagnostics` was ill-formed JSON, as keys weren't quoted strings and objects weren't separated by commas. Just use yojson.prettify instead. Before: ``` $ xenops-cli diagnostics { vm_actions: {} tasks: [] updates: { updates: [] barriers: [] } scheduler: [] workers: [ { state: Idle } { state: Idle } { state: Idle } ] queues: [] } ``` After: ``` $ xenops-cli diagnostics { "vm_actions": {}, "tasks": [], "updates": { "updates": [], "barriers": [] }, "scheduler": [], "workers": [ { "state": "Idle" }, { "state": "Idle" }, { "state": "Idle" } ], "queues": [] } ``` Nothing seems to rely on parsing the output of xenops-cli, so this should be safe. Signed-off-by: Andrii Sultanov --- ocaml/xenopsd/cli/dune | 3 +- ocaml/xenopsd/cli/xn.ml | 66 ++--------------------------------------- 2 files changed, 5 insertions(+), 64 deletions(-) diff --git a/ocaml/xenopsd/cli/dune b/ocaml/xenopsd/cli/dune index 0b2e0f0c2cf..f4cf59242c1 100644 --- a/ocaml/xenopsd/cli/dune +++ b/ocaml/xenopsd/cli/dune @@ -8,7 +8,7 @@ (libraries astring cmdliner - + re result rpclib.core @@ -22,6 +22,7 @@ xapi-idl.xen.interface xapi-idl.xen.interface.types xapi-stdext-pervasives + yojson ) (preprocess (per_module ((pps ppx_deriving_rpc) Common Xn_cfg_types))) ) diff --git a/ocaml/xenopsd/cli/xn.ml b/ocaml/xenopsd/cli/xn.ml index 9658650699f..811b004bdc3 100644 --- a/ocaml/xenopsd/cli/xn.ml +++ b/ocaml/xenopsd/cli/xn.ml @@ -701,70 +701,10 @@ let list_compact () = let list copts = diagnose_error (if copts.Common.verbose then list_verbose else list_compact) -type t = Line of string | Block of t list - -let pp x = - let open Rpc in - let rec to_string_list = function - | Line x -> - [x] - | Block xs -> - let xs' = List.map to_string_list xs |> List.concat in - List.map (fun x -> " " ^ x) xs' - in - let flatten xs = - let rec aux line = function - | Line x :: xs -> - aux (if line <> "" then line ^ " " ^ x else x) xs - | Block x :: xs -> - (if line <> "" then [Line line] else []) - @ [Block (aux "" x)] - @ aux "" xs - | [] -> - if line <> "" then [Line line] else [] - in - aux "" xs - in - let rec to_t = function - | Int32 x -> - [Line (Printf.sprintf "%d" (Int32.to_int x))] - | Int x -> - [Line (Printf.sprintf "%Ld" x)] - | Bool x -> - [Line (Printf.sprintf "%b" x)] - | Float x -> - [Line (Printf.sprintf "%g" x)] - | String x -> - [Line x] - | DateTime x -> - [Line x] - | Enum [] -> - [Line "[]"] - | Enum xs -> - [Line "["; Block (List.concat (List.map to_t xs)); Line "]"] - | Dict [] -> - [Line "{}"] - | Dict xs -> - [ - Line "{" - ; Block - (List.concat (List.map (fun (s, t) -> Line (s ^ ": ") :: to_t t) xs)) - ; Line "}" - ] - | Base64 x -> - [Line x] - | Null -> - [] - in - x - |> to_t - |> flatten - |> List.map to_string_list - |> List.concat - |> List.iter (Printf.printf "%s\n") - let diagnostics' () = - Client.get_diagnostics dbg () |> Jsonrpc.of_string |> pp ; + Client.get_diagnostics dbg () + |> Yojson.Safe.prettify ~std:true + |> print_endline ; `Ok () let stat_vm _ id = From 41bde423ab4c02f5e96be6b68e6bf7241bfd4564 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Thu, 5 Sep 2024 20:20:55 +0100 Subject: [PATCH 279/341] Minor doc corrections. Signed-off-by: Konstantina Chremmou --- ocaml/doc/wire-protocol.md | 4 ++-- ocaml/idl/templates/toc.mustache | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md index cc5734e76b8..155a27b23e0 100644 --- a/ocaml/doc/wire-protocol.md +++ b/ocaml/doc/wire-protocol.md @@ -371,9 +371,9 @@ should not assume that references generated during one session are valid for any future session. References do not allow objects to be compared for equality. Two references to the same object are not guaranteed to be textually identical. -UUIDs are intended to be permanent names for objects. They are +UUIDs are intended to be permanent identifiers for objects. They are guaranteed to be in the OSF DCE UUID presentation format (as output by `uuidgen`). -Clients may store UUIDs on disk and use them to lookup objects in subsequent sessions +Clients may store UUIDs on disk and use them to look up objects in subsequent sessions with the server. Clients may also test equality on objects by comparing UUID strings. The API provides mechanisms for translating between UUIDs and opaque references. diff --git a/ocaml/idl/templates/toc.mustache b/ocaml/idl/templates/toc.mustache index 4b58953b6e7..126bf2922e6 100644 --- a/ocaml/idl/templates/toc.mustache +++ b/ocaml/idl/templates/toc.mustache @@ -8,7 +8,7 @@ - title: Types url: @root@management-api/types.html {{#classes}} - - title: Class:{{{name}}} + - title: "Class: {{{name}}}" url: @root@management-api/class-{{{name_lower}}}.html {{/classes}} - title: Error Handling From 5927541a650a8bb43a3dc36387fba833924fe307 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Tue, 7 May 2024 20:15:31 +0100 Subject: [PATCH 280/341] Removed entries that don't correspond to API messages. Removed obsolete parsing for CSLG failures. Signed-off-by: Konstantina Chremmou --- ocaml/sdk-gen/csharp/FriendlyErrorNames.resx | 10 ---- ocaml/sdk-gen/csharp/autogen/src/Failure.cs | 60 ++------------------ 2 files changed, 4 insertions(+), 66 deletions(-) diff --git a/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx b/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx index 737b0a20d65..7562889272e 100644 --- a/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx +++ b/ocaml/sdk-gen/csharp/FriendlyErrorNames.resx @@ -354,16 +354,6 @@ Your current role is not authorized to perform this action. Action: {0} - - - Your current role is not authorized to perform this action. -Current Role: {0} -Authorized Roles: {1} - - - Your current role is not authorized to perform this action on {2}. -Current Role: {0} -Authorized Roles: {1} Cannot restore on this server because it was saved on an incompatible version diff --git a/ocaml/sdk-gen/csharp/autogen/src/Failure.cs b/ocaml/sdk-gen/csharp/autogen/src/Failure.cs index b877dfa6de1..923c5488d4e 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/Failure.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/Failure.cs @@ -1,18 +1,18 @@ /* * Copyright (c) Cloud Software Group, Inc. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1) Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * + * * 2) Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -141,7 +141,6 @@ where trimmed.Length > 0 //call these before setting the shortError because they modify the errorText ParseSmapiV3Failures(); - ParseCslgFailures(); try { @@ -181,57 +180,6 @@ private void ParseSmapiV3Failures() } } - /// - /// The ErrorDescription[2] of Cslg failures contains embedded xml. - /// This method parses it and copies the user friendly part to errorText. - /// - private void ParseCslgFailures() - { - /* ErrorDescription[2] example: - - - Host ivory has not yet been added to the service. [err=Object was not found] - - 6 - - Host ivory has not yet been added to the service. [err=Object was not found] - 2 - CXSSHostUtil::getHost - 113 - .\\xss_util_host.cpp - - - */ - - if (ErrorDescription.Count > 2 && ErrorDescription[2] != null && ErrorDescription[0] != null && ErrorDescription[0].StartsWith("SR_BACKEND_FAILURE")) - { - Match m = Regex.Match(ErrorDescription[2], @".*", RegexOptions.Singleline); - - if (m.Success) - { - XmlDocument doc = new XmlDocument(); - - try - { - doc.LoadXml(m.Value); - } - catch (XmlException) - { - return; - } - - XmlNodeList nodes = doc.SelectNodes("/StorageLinkServiceError/Fault"); - - if (nodes != null && nodes.Count > 0 && !string.IsNullOrEmpty(nodes[0].InnerText)) - { - errorText = string.IsNullOrEmpty(errorText) - ? nodes[0].InnerText - : string.Format("{0} ({1})", errorText, nodes[0].InnerText); - } - } - } - } - public override string ToString() { return Message; From a7c6f92593c01d6ecbf3b32e036b3ee4670dcbb8 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Thu, 5 Sep 2024 20:22:24 +0100 Subject: [PATCH 281/341] C SDK: curl flags are not needed since the SDK does not depend on curl. Signed-off-by: Konstantina Chremmou --- ocaml/sdk-gen/c/templates/Makefile.mustache | 2 -- 1 file changed, 2 deletions(-) diff --git a/ocaml/sdk-gen/c/templates/Makefile.mustache b/ocaml/sdk-gen/c/templates/Makefile.mustache index ac78e5ca1e6..939b54ad565 100644 --- a/ocaml/sdk-gen/c/templates/Makefile.mustache +++ b/ocaml/sdk-gen/c/templates/Makefile.mustache @@ -40,11 +40,9 @@ endif CFLAGS = -g -Iinclude \ $(shell xml2-config --cflags) \ - $(shell curl-config --cflags) \ -W -Wall -Wmissing-prototypes -Werror -std=c99 $(POS_FLAG) LDFLAGS = -g $(shell xml2-config --libs) \ - $(shell curl-config --libs) \ -Wl,-rpath,$(shell pwd) $(CYGWIN_LIBXML) # -h for Solaris From 0c1a5d12d0faf644e163a165b15999009fbebd7c Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 3 Sep 2024 15:15:58 +0100 Subject: [PATCH 282/341] IH-666: Report guest AD domain name and host name in the API Exposes one of the keys reported by the Windows Guest Agent through VM_guest_metrics. This is designed to make it easier for admins to match up the AD hostname to a VM object - and CVAD uses the FQDN (host name + domain) in their UI, so this is what's exposed. ``` $ xenstore-read /local/domain/10/data/domain eng.citrite.net $ xenstore-read /local/domain/10/data/host_name_dns WINDOWS-3LIPCLJ $ xe vm-param-get uuid=WIN_UUID param-name=netbios-name host_name: WINDOWS-3LIPCLJ.eng.citrite.net ``` Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 4 ++++ ocaml/idl/datamodel_lifecycle.ml | 2 ++ ocaml/idl/schematest.ml | 2 +- ocaml/xapi-cli-server/records.ml | 12 ++++++++++++ ocaml/xapi/import.ml | 1 + ocaml/xapi/xapi_guest_agent.ml | 23 ++++++++++++++++++++++- ocaml/xapi/xapi_vm_helpers.ml | 1 + 7 files changed, 43 insertions(+), 2 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 580ca92ddbb..4c014ca939c 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -5173,6 +5173,10 @@ module VM_guest_metrics = struct ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) "os_version" "version of the OS" + ; field ~qualifier:DynamicRO + ~ty:(Map (String, String)) + ~lifecycle:[] "netbios_name" "The NETBIOS name of the machine" + ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) "PV_drivers_version" "version of the PV drivers" diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index f759eeadbdf..fd2dee68944 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -67,6 +67,8 @@ let prototyped_of_field = function Some "22.27.0" | "host", "last_software_update" -> Some "22.20.0" + | "VM_guest_metrics", "netbios_name" -> + Some "24.27.0-next" | "VM", "groups" -> Some "24.19.1" | "VM", "pending_guidances_full" -> diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index e81d05ee0ab..9b25ca48aee 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "4417b0087b481c3038e73f170b7d4d01" +let last_known_schema_hash = "ce370e3b85178acfbcfce4963c4f8534" let current_schema_hash : string = let open Datamodel_types in diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 426b04b758b..697d0fb5d19 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -2301,6 +2301,18 @@ let vm_record rpc session_id vm = (xgm ()) ) () + ; make_field ~name:"netbios-name" + ~get:(fun () -> + Option.fold ~none:nid + ~some:(fun m -> get_from_map m.API.vM_guest_metrics_netbios_name) + (xgm ()) + ) + ~get_map:(fun () -> + Option.fold ~none:[] + ~some:(fun m -> m.API.vM_guest_metrics_netbios_name) + (xgm ()) + ) + () ; make_field ~name:"PV-drivers-version" ~get:(fun () -> Option.fold ~none:nid diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index bc9d3e1db0b..7b617adcb26 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -788,6 +788,7 @@ module GuestMetrics : HandlerTools = struct Db.VM_guest_metrics.create ~__context ~ref:gm ~uuid:(Uuidx.to_string (Uuidx.make ())) ~os_version:gm_record.API.vM_guest_metrics_os_version + ~netbios_name:gm_record.API.vM_guest_metrics_netbios_name ~pV_drivers_version:gm_record.API.vM_guest_metrics_PV_drivers_version ~pV_drivers_up_to_date: gm_record.API.vM_guest_metrics_PV_drivers_up_to_date diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index ffe5b8ae618..ffde9e7bf77 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -68,6 +68,10 @@ let os_version = ("attr/os/spminor", "spminor") (* windows *) ] +let netbios_name = [("data/host_name_dns", "host_name")] + +let dns_domain = [("data/domain", "dns_domain")] + let memory = [("data/meminfo_free", "free"); ("data/meminfo_total", "total")] let device_id = [("data/device_id", "device_id")] @@ -215,6 +219,7 @@ type m = (string * string) list type guest_metrics_t = { pv_drivers_version: m ; os_version: m + ; netbios_name: m ; networks: m ; other: m ; memory: m @@ -269,6 +274,14 @@ let get_initial_guest_metrics (lookup : string -> string option) in let pv_drivers_version = to_map pv_drivers_version and os_version = to_map os_version + and netbios_name = + match to_map dns_domain with + | [] -> + to_map netbios_name + | (_, dns_domain) :: _ -> + List.map + (fun (k, v) -> (k, Printf.sprintf "%s.%s" v dns_domain)) + (to_map netbios_name) and device_id = to_map device_id and networks = to_map @@ -294,6 +307,7 @@ let get_initial_guest_metrics (lookup : string -> string option) { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -311,7 +325,7 @@ let create_and_set_guest_metrics (lookup : string -> string option) let new_gm_uuid = Uuidx.to_string (Uuidx.make ()) and new_gm_ref = Ref.make () in Db.VM_guest_metrics.create ~__context ~ref:new_gm_ref ~uuid:new_gm_uuid - ~os_version:initial_gm.os_version + ~os_version:initial_gm.os_version ~netbios_name:initial_gm.netbios_name ~pV_drivers_version:initial_gm.pv_drivers_version ~pV_drivers_up_to_date:pV_drivers_detected ~memory:[] ~disks:[] ~networks:initial_gm.networks ~pV_drivers_detected ~other:initial_gm.other @@ -339,6 +353,7 @@ let all (lookup : string -> string option) (list : string -> string list) let { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -372,6 +387,7 @@ let all (lookup : string -> string option) (list : string -> string list) { pv_drivers_version= [] ; os_version= [] + ; netbios_name= [] ; networks= [] ; other= [] ; memory= [] @@ -388,6 +404,7 @@ let all (lookup : string -> string option) (list : string -> string list) { pv_drivers_version ; os_version + ; netbios_name ; networks ; other ; memory @@ -401,6 +418,7 @@ let all (lookup : string -> string option) (list : string -> string list) if (guest_metrics_cached.pv_drivers_version <> pv_drivers_version || guest_metrics_cached.os_version <> os_version + || guest_metrics_cached.netbios_name <> netbios_name || guest_metrics_cached.networks <> networks || guest_metrics_cached.other <> other || guest_metrics_cached.device_id <> device_id @@ -431,6 +449,9 @@ let all (lookup : string -> string option) (list : string -> string list) ~value:pv_drivers_version ; if guest_metrics_cached.os_version <> os_version then Db.VM_guest_metrics.set_os_version ~__context ~self:gm ~value:os_version ; + if guest_metrics_cached.netbios_name <> netbios_name then + Db.VM_guest_metrics.set_netbios_name ~__context ~self:gm + ~value:netbios_name ; if guest_metrics_cached.networks <> networks then Db.VM_guest_metrics.set_networks ~__context ~self:gm ~value:networks ; if guest_metrics_cached.other <> other then ( diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index 88590dc195b..1c295235b3c 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -1461,6 +1461,7 @@ let copy_guest_metrics ~__context ~vm = Db.VM_guest_metrics.create ~__context ~ref ~uuid:(Uuidx.to_string (Uuidx.make ())) ~os_version:all.API.vM_guest_metrics_os_version + ~netbios_name:all.API.vM_guest_metrics_netbios_name ~pV_drivers_version:all.API.vM_guest_metrics_PV_drivers_version ~pV_drivers_up_to_date:all.API.vM_guest_metrics_PV_drivers_up_to_date ~memory:all.API.vM_guest_metrics_memory From e40fe60d32d5c893304b23fd73b296ad2ec5576d Mon Sep 17 00:00:00 2001 From: Gang Ji Date: Tue, 25 Jun 2024 15:43:45 +0800 Subject: [PATCH 283/341] CP-47617: Expose backwards compat info to update packaging tooling The backwards compatibility info in xapi is the "api_version_major" and "api_version_minor". This info needs to be exposed to update packing tooling so that it can add the backwards compatibility data into update's metadata. 1. The defintions of api_version_major and api_version_minor are moved to xapi.spec, the file ocaml/idl/api_version.ml will be regenerated from api_version.ml.in with value of api_version_major and api_version_minor defined in xapi.spec by configure.ml during koji build. 2. Add ocaml/idl/api_version.ml only for buildng xapi with make. Signed-off-by: Gang Ji --- configure.ml | 18 +++++++++++++++++- ocaml/idl/api_version.ml | 22 ++++++++++++++++++++++ ocaml/idl/api_version.ml.in | 17 +++++++++++++++++ ocaml/idl/api_version.mli | 17 +++++++++++++++++ ocaml/idl/datamodel_common.ml | 4 ++-- ocaml/idl/dune | 2 +- 6 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 ocaml/idl/api_version.ml create mode 100644 ocaml/idl/api_version.ml.in create mode 100644 ocaml/idl/api_version.mli diff --git a/configure.ml b/configure.ml index e5c37d55fbc..91eee0f0ed7 100644 --- a/configure.ml +++ b/configure.ml @@ -57,6 +57,8 @@ let args = ; flag "yumplugindir" ~doc:"DIR YUM plugins" ~default:"/usr/lib/yum-plugins" ; flag "yumpluginconfdir" ~doc:"DIR YUM plugins conf dir" ~default:"/etc/yum/pluginconf.d" + ; flag "xapi_api_version_major" ~doc:"xapi api major version" ~default:"2" + ; flag "xapi_api_version_minor" ~doc:"xapi api minor version" ~default:"21" ] |> Arg.align @@ -84,7 +86,7 @@ let () = in List.iter print_endline lines ; (* Expand @LIBEXEC@ in udev rules *) - match Hashtbl.find_opt config "XENOPSD_LIBEXECDIR" with + ( match Hashtbl.find_opt config "XENOPSD_LIBEXECDIR" with | Some xenopsd_libexecdir -> expand "@LIBEXEC@" xenopsd_libexecdir "ocaml/xenopsd/scripts/vif.in" "ocaml/xenopsd/scripts/vif" ; @@ -93,3 +95,17 @@ let () = "ocaml/xenopsd/scripts/xen-backend.rules" | None -> failwith "xenopsd_libexecdir not set" + ) ; + + match + ( Hashtbl.find_opt config "XAPI_API_VERSION_MAJOR" + , Hashtbl.find_opt config "XAPI_API_VERSION_MINOR" + ) + with + | Some xapi_api_version_major, Some xapi_api_version_minor -> + expand "@APIVERMAJ@" xapi_api_version_major "ocaml/idl/api_version.ml.in" + "ocaml/idl/api_version.ml.in2" ; + expand "@APIVERMIN@" xapi_api_version_minor "ocaml/idl/api_version.ml.in2" + "ocaml/idl/api_version.ml" + | _, _ -> + failwith "xapi_api_version_major or xapi_api_version_minor not set" diff --git a/ocaml/idl/api_version.ml b/ocaml/idl/api_version.ml new file mode 100644 index 00000000000..297be24bc25 --- /dev/null +++ b/ocaml/idl/api_version.ml @@ -0,0 +1,22 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +(* This file is only needed for building xapi with local make, now the + api_version_major and api_version_minor are defined in xapi.spec and this + file will be regenerated from api_version.ml.in by configure.ml during koji + build. *) + +let api_version_major = 2L + +let api_version_minor = 21L diff --git a/ocaml/idl/api_version.ml.in b/ocaml/idl/api_version.ml.in new file mode 100644 index 00000000000..984d207c7f6 --- /dev/null +++ b/ocaml/idl/api_version.ml.in @@ -0,0 +1,17 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +let api_version_major = @APIVERMAJ@L + +let api_version_minor = @APIVERMIN@L diff --git a/ocaml/idl/api_version.mli b/ocaml/idl/api_version.mli new file mode 100644 index 00000000000..ed946e02e93 --- /dev/null +++ b/ocaml/idl/api_version.mli @@ -0,0 +1,17 @@ +(* + * Copyright (c) Cloud Software Group, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val api_version_major : int64 + +val api_version_minor : int64 diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index ec7e2d7fdb2..e66ab3eff93 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -153,9 +153,9 @@ let tech_preview_releases = (* api version *) (* Normally xencenter_min_verstring and xencenter_max_verstring in the xapi_globs should be set to the same value, * but there are exceptions: please consult the XenCenter maintainers if in doubt. *) -let api_version_major = 2L +let api_version_major = Api_version.api_version_major -let api_version_minor = 21L +let api_version_minor = Api_version.api_version_minor let api_version_string = Printf.sprintf "%Ld.%Ld" api_version_major api_version_minor diff --git a/ocaml/idl/dune b/ocaml/idl/dune index 430938311f8..31513e0cfca 100644 --- a/ocaml/idl/dune +++ b/ocaml/idl/dune @@ -6,7 +6,7 @@ datamodel_pool datamodel_cluster datamodel_cluster_host dm_api escaping datamodel_values datamodel_schema datamodel_certificate datamodel_diagnostics datamodel_repository datamodel_lifecycle - datamodel_vtpm datamodel_observer datamodel_vm_group) + datamodel_vtpm datamodel_observer datamodel_vm_group api_version) (libraries rpclib.core sexplib0 From 397de7ba7ced457ca00dab2f09a63d02fc37d713 Mon Sep 17 00:00:00 2001 From: Gang Ji Date: Thu, 4 Jul 2024 16:06:52 +0800 Subject: [PATCH 284/341] CP-46933: Expose XAPI API version in the output of HTTP API /updates XAPI API version is exposed from updateinfo.xml, to expose it in the output of HTTP API /updates, a new json field "xapi-api-version" is added in the json output only when XAPI API version is exposed from updateinfo.xml successfully, otherwise the returned json data keeps the original format. Signed-off-by: Gang Ji --- ocaml/tests/test_updateinfo.ml | 1277 ++++++++++++++++++-------------- ocaml/xapi/repository.ml | 23 +- ocaml/xapi/updateinfo.ml | 192 ++--- ocaml/xapi/updateinfo.mli | 10 +- 4 files changed, 851 insertions(+), 651 deletions(-) diff --git a/ocaml/tests/test_updateinfo.ml b/ocaml/tests/test_updateinfo.ml index 2adb7c9d2db..6df2f0b2fba 100644 --- a/ocaml/tests/test_updateinfo.ml +++ b/ocaml/tests/test_updateinfo.ml @@ -442,7 +442,11 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct module Io = struct type input_t = string - type output_t = ((string * UpdateInfo.t) list, exn) result + type output_t = + ( UpdateInfo.api_ver_t option * (UpdateInfo.id_t * UpdateInfo.t) list + , exn + ) + result let string_of_input_t s = s @@ -451,7 +455,10 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct str "%a" Dump.( result - ~ok:(list (pair string (record @@ fields_of_updateinfo))) + ~ok: + (pair (option string) + (list (pair string (record @@ fields_of_updateinfo))) + ) ~error:exn ) ) @@ -472,13 +479,22 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) ) ; (* No update in updateinfo.xml *) - ({| + ( {| - |}, Ok []) + |} + , Ok (None, []) + ) + ; (* No update in updateinfo.xml, but with xapi-api-version *) + ( {| + + + |} + , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) + ) ; (* Missing update_type *) ( {| - + UPDATE-0000 title @@ -494,7 +510,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing id *) ( {| - + title summary @@ -509,7 +525,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing summary *) ( {| - + UPDATE-0000 title @@ -524,7 +540,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ) ; (* Missing description *) ( {| - + UPDATE-0000 title @@ -537,35 +553,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Duplicate update ID *) ( {| - + UPDATE-0000 title @@ -588,7 +606,99 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Error Api_errors.(Server_error (invalid_updateinfo_xml, [])) ) + ; (* Single update, without xapi-api-version *) + ( {| + + + UPDATE-0000 + title + summary + description + special information + https://update.details.info + + + High + + + |} + , Ok + ( None + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) + ) ; (* Single update *) + ( {| + + + UPDATE-0000 + title + summary + description + special information + https://update.details.info + + + High + + + |} + , Ok + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) + ) + ; (* Two updates, without xapi-api-version *) ( {| @@ -602,39 +712,76 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct High + + UPDATE-0001 + title + summary + description + special information + https://update.details.info + + + None + |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( None + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ; ( "UPDATE-0001" + , UpdateInfo. + { + id= "UPDATE-0001" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Two updates *) ( {| - + UPDATE-0000 title @@ -660,60 +807,62 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ; ( "UPDATE-0001" - , UpdateInfo. - { - id= "UPDATE-0001" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ; ( "UPDATE-0001" + , UpdateInfo. + { + id= "UPDATE-0001" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with deprecated guidances only *) ( {| - + UPDATE-0000 title @@ -747,56 +896,58 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= - [ - Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Gte - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ; Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Lt - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= + [ + Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Gte + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ; Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Lt + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with unknown guidance *) ( {| - + UPDATE-0000 title @@ -842,56 +993,58 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Recommended, [RebootHost]) - ; (Full, [RebootHost; RestartVM]) - ; (Mandatory, [RebootHost]) - ; (Livepatch, []) - ] - ; guidance_applicabilities= - [ - Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Gte - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ; Applicability. - { - name= "xsconsole" - ; arch= "x86_64" - ; inequality= Some Lt - ; epoch= None - ; version= "10.1.0" - ; release= "25" - } - ] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Recommended, [RebootHost]) + ; (Full, [RebootHost; RestartVM]) + ; (Mandatory, [RebootHost]) + ; (Livepatch, []) + ] + ; guidance_applicabilities= + [ + Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Gte + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ; Applicability. + { + name= "xsconsole" + ; arch= "x86_64" + ; inequality= Some Lt + ; epoch= None + ; version= "10.1.0" + ; release= "25" + } + ] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatches and livepatch guidance *) ( {| - + UPDATE-0000 title @@ -916,58 +1069,60 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "8346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.19.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ; LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "8346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.19.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ; LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatches and unknown livepatch guidance *) ( {| - + UPDATE-0000 title @@ -992,58 +1147,60 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RebootHost]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "8346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.19.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ; LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" - ; severity= Severity.High - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RebootHost]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "8346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.19.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ; LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= + Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + ; severity= Severity.High + ; title= "title" + } + ) + ] + ) ) ; (* Single update with livepatch guidance but empty livepatch *) ( {| - + UPDATE-0000 title @@ -1064,35 +1221,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartDeviceModel]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartDeviceModel]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with valid livepatches *) ( {| - + UPDATE-0000 title @@ -1115,47 +1274,49 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= - [ - LivePatch. - { - component= Kernel - ; base_build_id= - "9346194f2e98a228f5a595b13ecabd43a99fada0" - ; base_version= "4.19.19" - ; base_release= "8.0.20.xs8" - ; to_version= "4.19.19" - ; to_release= "8.0.21.xs8" - } - ] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= + [ + LivePatch. + { + component= Kernel + ; base_build_id= + "9346194f2e98a228f5a595b13ecabd43a99fada0" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + ] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* Single update with invalid livepatches *) ( {| - + UPDATE-0000 title @@ -1178,35 +1339,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "description" - ; guidance= - [ - (Livepatch, [RestartToolstack]) - ; (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "description" + ; guidance= + [ + (Livepatch, [RestartToolstack]) + ; (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: empty guidance *) ( {| - + UPDATE-0000 title @@ -1227,35 +1390,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "empty guidance" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "empty guidance" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format only: empty guidance *) ( {| - + UPDATE-0000 title @@ -1273,35 +1438,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "guidance in new format only: empty guidance" - ; guidance= - [ - (Mandatory, []) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "guidance in new format only: empty guidance" + ; guidance= + [ + (Mandatory, []) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: empty mandatory and full *) ( {| - + UPDATE-0000 title @@ -1326,35 +1493,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "empty mandatory and full" - ; guidance= - [ - (Full, []) - ; (Mandatory, []) - ; (Recommended, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "empty mandatory and full" + ; guidance= + [ + (Full, []) + ; (Mandatory, []) + ; (Recommended, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory only *) ( {| - + UPDATE-0000 title @@ -1381,37 +1550,39 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "mandatory only" - ; guidance= - [ - ( Mandatory - , [RestartDeviceModel; EvacuateHost; RestartToolstack] - ) - ; (Recommended, []) - ; (Full, []) - ; (Livepatch, []) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "mandatory only" + ; guidance= + [ + ( Mandatory + , [RestartDeviceModel; EvacuateHost; RestartToolstack] + ) + ; (Recommended, []) + ; (Full, []) + ; (Livepatch, []) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory, recommended, full and livepatch *) ( {| - + UPDATE-0000 title @@ -1445,35 +1616,37 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "mandatory, recommended, full and livepatch" - ; guidance= - [ - (Full, [RebootHost]) - ; (Livepatch, [RestartDeviceModel]) - ; (Recommended, [EvacuateHost]) - ; (Mandatory, [RestartToolstack]) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "mandatory, recommended, full and livepatch" + ; guidance= + [ + (Full, [RebootHost]) + ; (Livepatch, [RestartDeviceModel]) + ; (Recommended, [EvacuateHost]) + ; (Mandatory, [RestartToolstack]) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ; (* guidance in new format: mandatory, recommended, full and livepatch *) ( {| - + UPDATE-0000 title @@ -1507,31 +1680,33 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct |} , Ok - [ - ( "UPDATE-0000" - , UpdateInfo. - { - id= "UPDATE-0000" - ; summary= "summary" - ; description= "RestartVM in mandatory" - ; guidance= - [ - (Full, [RebootHost]) - ; (Livepatch, [RestartDeviceModel]) - ; (Recommended, [EvacuateHost]) - ; (Mandatory, [RestartVM]) - ] - ; guidance_applicabilities= [] - ; spec_info= "special information" - ; url= "https://update.details.info" - ; update_type= "security" - ; livepatches= [] - ; issued= Xapi_stdext_date.Date.epoch - ; severity= Severity.None - ; title= "title" - } - ) - ] + ( Some "2.23" + , [ + ( "UPDATE-0000" + , UpdateInfo. + { + id= "UPDATE-0000" + ; summary= "summary" + ; description= "RestartVM in mandatory" + ; guidance= + [ + (Full, [RebootHost]) + ; (Livepatch, [RestartDeviceModel]) + ; (Recommended, [EvacuateHost]) + ; (Mandatory, [RestartVM]) + ] + ; guidance_applicabilities= [] + ; spec_info= "special information" + ; url= "https://update.details.info" + ; update_type= "security" + ; livepatches= [] + ; issued= Xapi_stdext_date.Date.epoch + ; severity= Severity.None + ; title= "title" + } + ) + ] + ) ) ] end) diff --git a/ocaml/xapi/repository.ml b/ocaml/xapi/repository.ml index 95007999782..d798246d0b0 100644 --- a/ocaml/xapi/repository.ml +++ b/ocaml/xapi/repository.ml @@ -298,7 +298,7 @@ let get_applied_livepatches_of_host updates_of_host = let is_livepatchable ~__context repository applied_livepatches_of_host = let updates_info = - parse_updateinfo ~__context ~self:repository ~check:false + parse_updateinfo ~__context ~self:repository ~check:false |> snd in List.exists (fun lp -> @@ -562,7 +562,7 @@ let get_pool_updates_in_json ~__context ~hosts = set_available_updates ~__context |> ignore ; let repository_name = get_repository_name ~__context ~self:repository in - let updates_info = + let api_ver, updates_info = parse_updateinfo ~__context ~self:repository ~check:true in let updates_of_hosts, ids_of_updates = @@ -579,12 +579,17 @@ let get_pool_updates_in_json ~__context ~hosts = |> List.map (fun upd_id -> List.assoc upd_id updates_info) |> List.map (prune_updateinfo_for_livepatches lps) in - `Assoc - [ - ("hosts", `List (List.map HostUpdates.to_json updates_of_hosts)) - ; ("updates", `List (List.map UpdateInfo.to_json updateinfo_list)) - ; ("hash", `String (Db.Repository.get_hash ~__context ~self:repository)) - ] + let f x = + Option.fold ~none:x + ~some:(fun api_ver -> ("xapi-api-version", `String api_ver) :: x) + api_ver + in + [ + ("hosts", `List (List.map HostUpdates.to_json updates_of_hosts)) + ; ("updates", `List (List.map UpdateInfo.to_json updateinfo_list)) + ; ("hash", `String (Db.Repository.get_hash ~__context ~self:repository)) + ] + |> fun x -> `Assoc (f x) with | Api_errors.(Server_error (code, _)) as e when code <> Api_errors.internal_error -> @@ -787,7 +792,7 @@ let apply_updates ~__context ~host ~hash = raise Api_errors.(Server_error (updateinfo_hash_mismatch, [])) ; with_pool_repositories (fun () -> let updates_info = - parse_updateinfo ~__context ~self:repository ~check:true + parse_updateinfo ~__context ~self:repository ~check:true |> snd in let updates_of_hosts = if Helpers.is_pool_master ~__context ~host then ( diff --git a/ocaml/xapi/updateinfo.ml b/ocaml/xapi/updateinfo.ml index 092af683232..375604cfe78 100644 --- a/ocaml/xapi/updateinfo.ml +++ b/ocaml/xapi/updateinfo.ml @@ -531,8 +531,10 @@ end module UpdateInfo = struct (** The [guidance] deprecates [rec_guidance], [abs_guidance] and [livepatch_guidance] *) + type id_t = string + type t = { - id: string + id: id_t ; summary: string ; description: string ; guidance: GuidanceInUpdateInfo.t @@ -546,6 +548,8 @@ module UpdateInfo = struct ; title: string } + type api_ver_t = string + let guidance_to_string o = Option.value (Option.map Guidance.to_string o) ~default:"" @@ -605,94 +609,106 @@ module UpdateInfo = struct Option.value (List.assoc_opt kind updateinfo.guidance) ~default:[] let of_xml = function - | Xml.Element ("updates", _, children) -> - List.filter_map - (fun n -> - match n with - | Xml.Element ("update", attr, update_nodes) -> - let ty = - match List.assoc_opt "type" attr with - | Some ty -> - ty - | None -> - "" - in - let ui = - List.fold_left - (fun acc node -> - match node with - | Xml.Element ("id", _, [Xml.PCData v]) -> - {acc with id= v} - | Xml.Element ("url", _, [Xml.PCData v]) -> - {acc with url= v} - | Xml.Element ("special_info", _, [Xml.PCData v]) -> - {acc with spec_info= v} - | Xml.Element ("summary", _, [Xml.PCData v]) -> - {acc with summary= v} - | Xml.Element ("description", _, [Xml.PCData v]) -> - {acc with description= v} - | Xml.Element ("guidance", _, guidance_blocks) -> - { - acc with - guidance= - GuidanceInUpdateInfo.of_xml guidance_blocks - } - | Xml.Element ("guidance_applicabilities", _, apps) -> - { - acc with - guidance_applicabilities= - List.filter_map Applicability.of_xml apps - } - | Xml.Element ("livepatches", _, livepatches) -> - {acc with livepatches= LivePatch.of_xml livepatches} - | Xml.Element ("issued", attr, _) -> - let issued = - match List.assoc_opt "date" attr with - | Some date -> ( - try - Xapi_stdext_date.Date.of_string - (Scanf.sscanf date - "%04d-%02d-%02d %02d:%02d:%02d" - (fun y mon d h m s -> - Printf.sprintf - "%04i%02i%02iT%02i:%02i:%02iZ" y mon d - h m s - ) - ) - with e -> - (* The error should not block update. Ingore it - and set "issued" as epoch. *) - warn "%s" (ExnHelper.string_of_exn e) ; - Xapi_stdext_date.Date.epoch - ) - | None -> - Xapi_stdext_date.Date.epoch - in - {acc with issued} - | Xml.Element ("severity", _, [Xml.PCData v]) -> ( - try {acc with severity= Severity.of_string v} - with e -> - (* The error should not block update. Ingore it. *) - warn "%s" (ExnHelper.string_of_exn e) ; - acc + | Xml.Element ("updates", attrs, children) -> ( + let api_ver = List.assoc_opt "xapi-api-version" attrs in + let uis = + List.filter_map + (fun n -> + match n with + | Xml.Element ("update", attrs, update_nodes) -> + let ty = + match List.assoc_opt "type" attrs with + | Some ty -> + ty + | None -> + "" + in + let ui = + List.fold_left + (fun acc node -> + match node with + | Xml.Element ("id", _, [Xml.PCData v]) -> + {acc with id= v} + | Xml.Element ("url", _, [Xml.PCData v]) -> + {acc with url= v} + | Xml.Element ("special_info", _, [Xml.PCData v]) -> + {acc with spec_info= v} + | Xml.Element ("summary", _, [Xml.PCData v]) -> + {acc with summary= v} + | Xml.Element ("description", _, [Xml.PCData v]) -> + {acc with description= v} + | Xml.Element ("guidance", _, guidance_blocks) -> + { + acc with + guidance= + GuidanceInUpdateInfo.of_xml guidance_blocks + } + | Xml.Element ("guidance_applicabilities", _, apps) -> + { + acc with + guidance_applicabilities= + List.filter_map Applicability.of_xml apps + } + | Xml.Element ("livepatches", _, livepatches) -> + {acc with livepatches= LivePatch.of_xml livepatches} + | Xml.Element ("issued", attrs, _) -> + let issued = + match List.assoc_opt "date" attrs with + | Some date -> ( + try + Xapi_stdext_date.Date.of_string + (Scanf.sscanf date + "%04d-%02d-%02d %02d:%02d:%02d" + (fun y mon d h m s -> + Printf.sprintf + "%04i%02i%02iT%02i:%02i:%02iZ" y mon + d h m s + ) + ) + with e -> + (* The error should not block update. Ingore it + and set "issued" as epoch. *) + warn "%s" (ExnHelper.string_of_exn e) ; + Xapi_stdext_date.Date.epoch + ) + | None -> + Xapi_stdext_date.Date.epoch + in + {acc with issued} + | Xml.Element ("severity", _, [Xml.PCData v]) -> ( + try {acc with severity= Severity.of_string v} + with e -> + (* The error should not block update. Ingore it. *) + warn "%s" (ExnHelper.string_of_exn e) ; + acc + ) + | Xml.Element ("title", _, [Xml.PCData v]) -> + {acc with title= v} + | _ -> + acc ) - | Xml.Element ("title", _, [Xml.PCData v]) -> - {acc with title= v} - | _ -> - acc - ) - {default with update_type= ty} - update_nodes - |> assert_valid_updateinfo - in - debug "updateinfo: %s" (to_string ui) ; - Some ui - | _ -> - None - ) - children - |> assert_no_dup_update_id - |> List.map (fun updateinfo -> (updateinfo.id, updateinfo)) + {default with update_type= ty} + update_nodes + |> assert_valid_updateinfo + in + debug "updateinfo: %s" (to_string ui) ; + Some ui + | _ -> + None + ) + children + |> assert_no_dup_update_id + |> List.map (fun updateinfo -> (updateinfo.id, updateinfo)) + in + match (api_ver, uis) with + | Some v, [] -> + error + "Unexpected xapi-api-version: %s when there is no updates at all" + v ; + raise Api_errors.(Server_error (invalid_updateinfo_xml, [])) + | _, _ -> + (api_ver, uis) + ) | _ -> error "Failed to parse updateinfo.xml: missing " ; raise Api_errors.(Server_error (invalid_updateinfo_xml, [])) diff --git a/ocaml/xapi/updateinfo.mli b/ocaml/xapi/updateinfo.mli index 7a348db598c..8948d778d23 100644 --- a/ocaml/xapi/updateinfo.mli +++ b/ocaml/xapi/updateinfo.mli @@ -143,8 +143,10 @@ end (** The metadata of one update in updateinfo. *) module UpdateInfo : sig + type id_t = string + type t = { - id: string + id: id_t ; summary: string ; description: string ; guidance: GuidanceInUpdateInfo.t @@ -158,13 +160,15 @@ module UpdateInfo : sig ; title: string } + type api_ver_t = string + val to_json : t -> Yojson.Basic.t val guidance_to_string : Guidance.t option -> string - val of_xml : Xml.xml -> (string * t) list + val of_xml : Xml.xml -> api_ver_t option * (id_t * t) list - val of_xml_file : string -> (string * t) list + val of_xml_file : string -> api_ver_t option * (id_t * t) list val get_guidances_of_kind : kind:Guidance.kind -> t -> Guidance.t list end From cf27ff9f0915f2363c0bdc39f34cc5b211d3f8f6 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Wed, 17 Jul 2024 10:56:57 +0100 Subject: [PATCH 285/341] Expand Go deserialization support for xen-api dates Signed-off-by: Danilo Del Busso --- .../sdk-gen/go/templates/ConvertTime.mustache | 29 ++++++++++++++++++- ocaml/sdk-gen/go/test_data/time_convert.go | 29 ++++++++++++++++++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/ocaml/sdk-gen/go/templates/ConvertTime.mustache b/ocaml/sdk-gen/go/templates/ConvertTime.mustache index d1f18643057..d6f0e2a63d5 100644 --- a/ocaml/sdk-gen/go/templates/ConvertTime.mustache +++ b/ocaml/sdk-gen/go/templates/ConvertTime.mustache @@ -1,5 +1,32 @@ {{#serialize}} -var timeFormats = []string{time.RFC3339, "20060102T15:04:05Z", "20060102T15:04:05"} +var timeFormats = []string{ + time.RFC3339, + "2006-01-02T15:04:05", + + // no dashes, no colons + "20060102T15:04:05Z", + "20060102T15:04:05", + "20060102T150405.999999999Z0700", + "20060102T150405", + "20060102T150405Z07", + "20060102T150405Z07:00", + + // no dashes, with colons + "20060102T15:04:05Z07", + "20060102T15:04:05Z0700", + "20060102T15:04:05Z07:00", + "20060102T15:04:05.999999999Z07", + "20060102T15:04:05.999999999Z07:00", + "20060102T15:04:05.999999999Z07", + + // dashes and colon patterns not covered by `time.RFC3339` + "2006-01-02T15:04:05Z07", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05.999999999Z07", + "2006-01-02T15:04:05.999999999Z07:00", + "2006-01-02T15:04:05.999999999Z07", +} //nolint:unparam func serialize{{func_name_suffix}}(context string, value {{type}}) (string, error) { diff --git a/ocaml/sdk-gen/go/test_data/time_convert.go b/ocaml/sdk-gen/go/test_data/time_convert.go index 7bbdf602ced..d9d5483d5b3 100644 --- a/ocaml/sdk-gen/go/test_data/time_convert.go +++ b/ocaml/sdk-gen/go/test_data/time_convert.go @@ -1,4 +1,31 @@ -var timeFormats = []string{time.RFC3339, "20060102T15:04:05Z", "20060102T15:04:05"} +var timeFormats = []string{ + time.RFC3339, + "2006-01-02T15:04:05", + + // no dashes, no colons + "20060102T15:04:05Z", + "20060102T15:04:05", + "20060102T150405.999999999Z0700", + "20060102T150405", + "20060102T150405Z07", + "20060102T150405Z07:00", + + // no dashes, with colons + "20060102T15:04:05Z07", + "20060102T15:04:05Z0700", + "20060102T15:04:05Z07:00", + "20060102T15:04:05.999999999Z07", + "20060102T15:04:05.999999999Z07:00", + "20060102T15:04:05.999999999Z07", + + // dashes and colon patterns not covered by `time.RFC3339` + "2006-01-02T15:04:05Z07", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05.999999999Z07", + "2006-01-02T15:04:05.999999999Z07:00", + "2006-01-02T15:04:05.999999999Z07", +} //nolint:unparam func serializeTime(context string, value time.Time) (string, error) { From 8f4c11278be697f52381a25f55880391cd7718a6 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Wed, 17 Jul 2024 10:57:02 +0100 Subject: [PATCH 286/341] Expand C# deserialization support for xen-api dates Signed-off-by: Danilo Del Busso --- .../sdk-gen/csharp/autogen/src/Converters.cs | 56 ++++++++++++++++--- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/ocaml/sdk-gen/csharp/autogen/src/Converters.cs b/ocaml/sdk-gen/csharp/autogen/src/Converters.cs index 2c4e4ba0df7..32b02d987a6 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/Converters.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/Converters.cs @@ -385,16 +385,54 @@ public override object ReadJson(JsonReader reader, Type objectType, object exist internal class XenDateTimeConverter : IsoDateTimeConverter { - private static readonly string[] DateFormatsUniversal = - { - "yyyyMMddTHH:mm:ssZ", "yyyy-MM-ddThh:mm:ssZ" + string [] DateFormatsUtc = { + // dashes and colons + "yyyy-MM-ddTHH:mm:ssZ", + "yyyy-MM-ddTHH:mm:ss.fffZ", + + // no dashes, with colons + "yyyyMMddTHH:mm:ssZ", + "yyyyMMddTHH:mm:ss.fffZ", + + // no dashes + "yyyyMMddTHHmmssZ", + "yyyyMMddTHHmmss.fffZ", }; - private static readonly string[] DateFormatsOther = + string[] DateFormatsLocal = { - "yyyyMMddTHH:mm:ss", + // no dashes + "yyyyMMddTHHmmss.fffzzzz", + "yyyyMMddTHHmmss.fffzzz", + "yyyyMMddTHHmmss.fffzz", + "yyyyMMddTHHmmss.fff", + + "yyyyMMddTHHmmsszzzz", "yyyyMMddTHHmmsszzz", - "yyyyMMddTHHmmsszz" + "yyyyMMddTHHmmsszz", + "yyyyMMddTHHmmss", + + // no dashes, with colons + "yyyyMMddTHH:mm:ss.fffzzzz", + "yyyyMMddTHH:mm:ss.fffzzz", + "yyyyMMddTHH:mm:ss.fffzz", + "yyyyMMddTHH:mm:ss.fff", + + "yyyyMMddTHH:mm:sszzzz", + "yyyyMMddTHH:mm:sszzz", + "yyyyMMddTHH:mm:sszz", + "yyyyMMddTHH:mm:ss", + + // dashes and colons + "yyyy-MM-ddTHH:mm:ss.fffzzzz", + "yyyy-MM-ddTHH:mm:ss.fffzzz", + "yyyy-MM-ddTHH:mm:ss.fffzz", + "yyyy-MM-ddTHH:mm:ss.fff", + + "yyyy-MM-ddTHH:mm:sszzzz", + "yyyy-MM-ddTHH:mm:sszzz", + "yyyy-MM-ddTHH:mm:sszz", + "yyyy-MM-ddTHH:mm:ss", }; public override object ReadJson(JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer) @@ -403,11 +441,11 @@ public override object ReadJson(JsonReader reader, Type objectType, object exist DateTime result; - if (DateTime.TryParseExact(str, DateFormatsUniversal, CultureInfo.InvariantCulture, + if (DateTime.TryParseExact(str, DateFormatsUtc, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal, out result)) return result; - if (DateTime.TryParseExact(str, DateFormatsOther, CultureInfo.InvariantCulture, + if (DateTime.TryParseExact(str, DateFormatsLocal, CultureInfo.InvariantCulture, DateTimeStyles.None, out result)) return result; @@ -420,7 +458,7 @@ public override void WriteJson(JsonWriter writer, object value, JsonSerializer s { var dateTime = (DateTime)value; dateTime = dateTime.ToUniversalTime(); - var text = dateTime.ToString(DateFormatsUniversal[0], CultureInfo.InvariantCulture); + var text = dateTime.ToString(DateFormatsUtc[0], CultureInfo.InvariantCulture); writer.WriteValue(text); return; } From dcfc0e47a877495e819b0dbc41b7e96a70a7fc83 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Wed, 17 Jul 2024 10:57:13 +0100 Subject: [PATCH 287/341] Expand C deserialization support for xen-api dates Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/c/autogen/src/xen_common.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/ocaml/sdk-gen/c/autogen/src/xen_common.c b/ocaml/sdk-gen/c/autogen/src/xen_common.c index 9178d3fd43f..0cb089d35d9 100644 --- a/ocaml/sdk-gen/c/autogen/src/xen_common.c +++ b/ocaml/sdk-gen/c/autogen/src/xen_common.c @@ -950,7 +950,26 @@ static void parse_into(xen_session *s, xmlNode *value_node, { struct tm tm; memset(&tm, 0, sizeof(tm)); - strptime((char *)string, "%Y%m%dT%H:%M:%S", &tm); + // We only support basic ISO8601 since the C SDK only + // connects to the XML-RPC backend + char *formats[] = { + // no dashes, no colons + "%Y%m%dT%H%M%S", + // no dashes, with colons + "%Y%m%dT%H:%M:%S", + // dashes and colons + "%Y-%m-%dT%H:%M:%S", + }; + int num_formats = sizeof(formats) / sizeof(formats[0]); + + for (int i = 0; i < num_formats; i++) + { + if (strptime((char *)string, formats[i], &tm) != NULL) + { + break; + } + } + ((time_t *)value)[slot] = (time_t)mktime(&tm); free(string); } From 820fb80be19247258f40de150bf898d2cfb3036c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 15:46:19 +0100 Subject: [PATCH 288/341] [maintenance]: mark data only dirs as such MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tell dune which directories don't contain dune files, so it can avoid scanning them. They can still be used as dependencies though. Unit test input data is particularly large for example. Signed-off-by: Edwin Török --- dune | 4 ++++ ocaml/doc/dune | 1 + ocaml/dune | 1 + ocaml/gencert/dune | 2 ++ ocaml/idl/dune | 2 ++ ocaml/idl/ocaml_backend/dune | 1 + ocaml/libs/xapi-rrd/lib_test/dune | 2 ++ ocaml/message-switch/dune | 2 ++ ocaml/networkd/test/dune | 2 ++ ocaml/rrd2csv/dune | 1 + ocaml/sdk-gen/c/autogen/dune | 1 + ocaml/sdk-gen/c/dune | 1 + ocaml/sdk-gen/csharp/autogen/dune | 4 +++- ocaml/sdk-gen/csharp/dune | 1 + ocaml/sdk-gen/dune | 1 + ocaml/sdk-gen/go/autogen/dune | 2 ++ ocaml/sdk-gen/go/dune | 2 ++ ocaml/sdk-gen/java/autogen/dune | 1 + ocaml/sdk-gen/java/dune | 1 + ocaml/sdk-gen/powershell/autogen/dune | 2 ++ ocaml/sdk-gen/powershell/dune | 1 + ocaml/squeezed/dune | 1 + ocaml/tests/dune | 2 ++ ocaml/xapi-idl/dune | 1 + ocaml/xapi-idl/lib_test/dune | 2 ++ ocaml/xapi-storage-script/dune | 1 + ocaml/xapi-storage/dune | 1 + ocaml/xapi-storage/python/dune | 1 + ocaml/xcp-rrdd/dune | 1 + ocaml/xenopsd/dune | 1 + 30 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 ocaml/dune create mode 100644 ocaml/rrd2csv/dune create mode 100644 ocaml/sdk-gen/dune create mode 100644 ocaml/squeezed/dune create mode 100644 ocaml/xapi-idl/dune create mode 100644 ocaml/xapi-storage/dune create mode 100644 ocaml/xapi-storage/python/dune create mode 100644 ocaml/xcp-rrdd/dune create mode 100644 ocaml/xenopsd/dune diff --git a/dune b/dune index e2b4842adb5..2a094a073a9 100644 --- a/dune +++ b/dune @@ -13,3 +13,7 @@ (executable (name configure) (libraries dune-configurator findlib cmdliner unix)) + +; Can still be used for dependencies, but dune won't scan these dirs +; for dune files +(data_only_dirs doc scripts python3 .vscode) diff --git a/ocaml/doc/dune b/ocaml/doc/dune index ee0f921d032..7c3dbcf4f68 100644 --- a/ocaml/doc/dune +++ b/ocaml/doc/dune @@ -35,3 +35,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/dune b/ocaml/dune new file mode 100644 index 00000000000..dbdeef2876a --- /dev/null +++ b/ocaml/dune @@ -0,0 +1 @@ +(data_only_dirs xe) diff --git a/ocaml/gencert/dune b/ocaml/gencert/dune index ef7875abd29..66a78ca4a41 100644 --- a/ocaml/gencert/dune +++ b/ocaml/gencert/dune @@ -67,3 +67,5 @@ ) (action (run %{test} --color=always)) ) + +(data_only_dirs test_data) diff --git a/ocaml/idl/dune b/ocaml/idl/dune index 31513e0cfca..8d337c5c7f2 100644 --- a/ocaml/idl/dune +++ b/ocaml/idl/dune @@ -81,3 +81,5 @@ (alias update-dm-lifecycle) (action (diff datamodel_lifecycle.ml datamodel_lifecycle.ml.generated))) + +(data_only_dirs templates) diff --git a/ocaml/idl/ocaml_backend/dune b/ocaml/idl/ocaml_backend/dune index f6c4173d363..70cc34c90a1 100644 --- a/ocaml/idl/ocaml_backend/dune +++ b/ocaml/idl/ocaml_backend/dune @@ -11,3 +11,4 @@ ) ) +(data_only_dirs python) diff --git a/ocaml/libs/xapi-rrd/lib_test/dune b/ocaml/libs/xapi-rrd/lib_test/dune index 7a66380a63e..1c62f716d46 100644 --- a/ocaml/libs/xapi-rrd/lib_test/dune +++ b/ocaml/libs/xapi-rrd/lib_test/dune @@ -25,3 +25,5 @@ xapi-rrd ) ) + +(data_only_dirs test_data) diff --git a/ocaml/message-switch/dune b/ocaml/message-switch/dune index a0d445776e6..3daaf679370 100644 --- a/ocaml/message-switch/dune +++ b/ocaml/message-switch/dune @@ -1,3 +1,5 @@ (executable (name configure) (libraries dune-configurator findlib)) + +(data_only_dirs www python) diff --git a/ocaml/networkd/test/dune b/ocaml/networkd/test/dune index 951eda074a0..9d7ac2c9248 100644 --- a/ocaml/networkd/test/dune +++ b/ocaml/networkd/test/dune @@ -25,3 +25,5 @@ ) ) ) + +(data_only_dirs jsonrpc_files) diff --git a/ocaml/rrd2csv/dune b/ocaml/rrd2csv/dune new file mode 100644 index 00000000000..97dab88ae44 --- /dev/null +++ b/ocaml/rrd2csv/dune @@ -0,0 +1 @@ +(data_only_dirs man) diff --git a/ocaml/sdk-gen/c/autogen/dune b/ocaml/sdk-gen/c/autogen/dune index e7809a95ba5..78b81f38e4c 100644 --- a/ocaml/sdk-gen/c/autogen/dune +++ b/ocaml/sdk-gen/c/autogen/dune @@ -23,3 +23,4 @@ ) ) +(data_only_dirs src include) diff --git a/ocaml/sdk-gen/c/dune b/ocaml/sdk-gen/c/dune index 79cb32b80c6..ca7f44dee18 100644 --- a/ocaml/sdk-gen/c/dune +++ b/ocaml/sdk-gen/c/dune @@ -19,3 +19,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/csharp/autogen/dune b/ocaml/sdk-gen/csharp/autogen/dune index d5e542936ad..61e1f86a0a4 100644 --- a/ocaml/sdk-gen/csharp/autogen/dune +++ b/ocaml/sdk-gen/csharp/autogen/dune @@ -12,4 +12,6 @@ LICENSE (source_tree .) ) -) \ No newline at end of file +) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/csharp/dune b/ocaml/sdk-gen/csharp/dune index e7112b1aae9..df6856bfc22 100644 --- a/ocaml/sdk-gen/csharp/dune +++ b/ocaml/sdk-gen/csharp/dune @@ -46,3 +46,4 @@ (action (run %{x} -s %{y})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/dune b/ocaml/sdk-gen/dune new file mode 100644 index 00000000000..49140147129 --- /dev/null +++ b/ocaml/sdk-gen/dune @@ -0,0 +1 @@ +(data_only_dirs component-test) diff --git a/ocaml/sdk-gen/go/autogen/dune b/ocaml/sdk-gen/go/autogen/dune index c1cb1ddd3b8..98bbd45a418 100644 --- a/ocaml/sdk-gen/go/autogen/dune +++ b/ocaml/sdk-gen/go/autogen/dune @@ -22,3 +22,5 @@ (source_tree .) ) ) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/go/dune b/ocaml/sdk-gen/go/dune index 6d99103516a..de55ec5cee8 100644 --- a/ocaml/sdk-gen/go/dune +++ b/ocaml/sdk-gen/go/dune @@ -44,3 +44,5 @@ (source_tree templates) ) ) + +(data_only_dirs test_data templates) diff --git a/ocaml/sdk-gen/java/autogen/dune b/ocaml/sdk-gen/java/autogen/dune index ba31f05eaaf..0d4efe16d03 100644 --- a/ocaml/sdk-gen/java/autogen/dune +++ b/ocaml/sdk-gen/java/autogen/dune @@ -5,3 +5,4 @@ ) ) +(data_only_dirs xen-api) diff --git a/ocaml/sdk-gen/java/dune b/ocaml/sdk-gen/java/dune index 498b3a7bc09..a1daac834b0 100644 --- a/ocaml/sdk-gen/java/dune +++ b/ocaml/sdk-gen/java/dune @@ -29,3 +29,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/sdk-gen/powershell/autogen/dune b/ocaml/sdk-gen/powershell/autogen/dune index 56eb4c8480a..61e1f86a0a4 100644 --- a/ocaml/sdk-gen/powershell/autogen/dune +++ b/ocaml/sdk-gen/powershell/autogen/dune @@ -13,3 +13,5 @@ (source_tree .) ) ) + +(data_only_dirs src) diff --git a/ocaml/sdk-gen/powershell/dune b/ocaml/sdk-gen/powershell/dune index 39b2f99b75f..826885af543 100644 --- a/ocaml/sdk-gen/powershell/dune +++ b/ocaml/sdk-gen/powershell/dune @@ -19,3 +19,4 @@ (action (run %{x})) ) +(data_only_dirs templates) diff --git a/ocaml/squeezed/dune b/ocaml/squeezed/dune new file mode 100644 index 00000000000..389b982cc01 --- /dev/null +++ b/ocaml/squeezed/dune @@ -0,0 +1 @@ +(data_only_dirs scripts) diff --git a/ocaml/tests/dune b/ocaml/tests/dune index 7d351d5e45c..24ab0ef29d9 100644 --- a/ocaml/tests/dune +++ b/ocaml/tests/dune @@ -168,3 +168,5 @@ ) (env (_ (env-vars (XAPI_TEST 1)))) + +(data_only_dirs test_data tests) diff --git a/ocaml/xapi-idl/dune b/ocaml/xapi-idl/dune new file mode 100644 index 00000000000..85c1a3c24e2 --- /dev/null +++ b/ocaml/xapi-idl/dune @@ -0,0 +1 @@ +(data_only_dirs designs xen-api-plugin) diff --git a/ocaml/xapi-idl/lib_test/dune b/ocaml/xapi-idl/lib_test/dune index 1b1e8193ca7..c29a72fe6c6 100644 --- a/ocaml/xapi-idl/lib_test/dune +++ b/ocaml/xapi-idl/lib_test/dune @@ -1,3 +1,5 @@ +(data_only_dirs test_data) + (library (name test_lib) (modules idl_test_common) diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index 5fa4f0f28fa..a3b86f166b4 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -72,3 +72,4 @@ (action (bash "export PYTHONPATH=../xapi-storage/python/; echo $PYTHONPATH; ./%{x} --root=$PWD/test --self-test-only=true")) ) +(data_only_dirs test examples) diff --git a/ocaml/xapi-storage/dune b/ocaml/xapi-storage/dune new file mode 100644 index 00000000000..f45f1f79866 --- /dev/null +++ b/ocaml/xapi-storage/dune @@ -0,0 +1 @@ +(data_only_dirs rpc-light) diff --git a/ocaml/xapi-storage/python/dune b/ocaml/xapi-storage/python/dune new file mode 100644 index 00000000000..261d3661603 --- /dev/null +++ b/ocaml/xapi-storage/python/dune @@ -0,0 +1 @@ +(data_only_dirs examples) diff --git a/ocaml/xcp-rrdd/dune b/ocaml/xcp-rrdd/dune new file mode 100644 index 00000000000..db0846cbfe3 --- /dev/null +++ b/ocaml/xcp-rrdd/dune @@ -0,0 +1 @@ +(data_only_dirs scripts bugtool-plugin) diff --git a/ocaml/xenopsd/dune b/ocaml/xenopsd/dune new file mode 100644 index 00000000000..389b982cc01 --- /dev/null +++ b/ocaml/xenopsd/dune @@ -0,0 +1 @@ +(data_only_dirs scripts) From 710687fca00e92611935725a12effdbdc523fe78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 16:37:45 +0100 Subject: [PATCH 289/341] [maintenance] disable preprocessor for modules that do not need them MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a followup for: Fixes: f07b9d7d7a57 ("[maintenance]: preprocess only modules containing @@deriving") Signed-off-by: Edwin Török --- ocaml/quicktest/dune | 2 +- ocaml/tests/dune | 2 +- ocaml/xapi-cli-server/dune | 2 +- ocaml/xapi-idl/gpumon/dune | 2 +- ocaml/xapi-idl/rrd/dune | 3 +-- ocaml/xapi-idl/storage/dune | 10 ++++------ ocaml/xapi-idl/xen/dune | 5 ++--- 7 files changed, 11 insertions(+), 15 deletions(-) diff --git a/ocaml/quicktest/dune b/ocaml/quicktest/dune index 9a8a4a75043..b061ff1176c 100644 --- a/ocaml/quicktest/dune +++ b/ocaml/quicktest/dune @@ -40,7 +40,7 @@ xenctrl xml-light2 ) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv)) + (preprocess (per_module ((pps ppx_deriving_rpc) Quicktest_vm_lifecycle))) ) diff --git a/ocaml/tests/dune b/ocaml/tests/dune index 24ab0ef29d9..fe620e8fc7f 100644 --- a/ocaml/tests/dune +++ b/ocaml/tests/dune @@ -122,7 +122,7 @@ xml-light2 yojson ) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv)) + (preprocess (per_module ((pps ppx_deriving_rpc) Test_cluster_host))) ) (test (name test_storage_smapiv1_wrapper) diff --git a/ocaml/xapi-cli-server/dune b/ocaml/xapi-cli-server/dune index 8d7cacd6ad8..c1a8269dbb6 100644 --- a/ocaml/xapi-cli-server/dune +++ b/ocaml/xapi-cli-server/dune @@ -45,7 +45,7 @@ xmlm xml-light2 ) - (preprocess (pps ppx_deriving_rpc)) + (preprocess (per_module ((pps ppx_deriving_rpc) Cli_operations))) (wrapped false) ) diff --git a/ocaml/xapi-idl/gpumon/dune b/ocaml/xapi-idl/gpumon/dune index ccd184e9098..de10e06dae6 100644 --- a/ocaml/xapi-idl/gpumon/dune +++ b/ocaml/xapi-idl/gpumon/dune @@ -11,7 +11,7 @@ xapi-log ) (wrapped false) - (preprocess (pps ppx_deriving_rpc))) + (preprocess (per_module ((pps ppx_deriving_rpc) Gpumon_interface)))) (executable (name gpumon_cli) diff --git a/ocaml/xapi-idl/rrd/dune b/ocaml/xapi-idl/rrd/dune index 9462c9341e6..8a427a965e3 100644 --- a/ocaml/xapi-idl/rrd/dune +++ b/ocaml/xapi-idl/rrd/dune @@ -39,8 +39,7 @@ (re_export xapi-idl.rrd.interface.types) xapi-rrd ) - (wrapped false) - (preprocess (pps ppx_deriving_rpc))) + (wrapped false)) (executable (name rrd_cli) diff --git a/ocaml/xapi-idl/storage/dune b/ocaml/xapi-idl/storage/dune index 036e8dedd89..1ca965d4368 100644 --- a/ocaml/xapi-idl/storage/dune +++ b/ocaml/xapi-idl/storage/dune @@ -10,7 +10,7 @@ xapi-idl ) (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (preprocess (pps ppx_deriving_rpc))) (library (name xcp_storage_interface) @@ -28,7 +28,7 @@ xapi-log ) (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc ppx_deriving.show))) + (preprocess (pps ppx_deriving_rpc ppx_deriving.show))) (library (name xcp_storage) @@ -43,8 +43,7 @@ xapi-idl.storage.interface xapi-stdext-date ) - (wrapped false) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + (wrapped false)) (test (name storage_test) @@ -58,8 +57,7 @@ xapi-idl xapi-idl.storage xapi-idl.storage.interface - ) - (preprocess (pps ppx_sexp_conv ppx_deriving_rpc))) + )) (test (name suite) diff --git a/ocaml/xapi-idl/xen/dune b/ocaml/xapi-idl/xen/dune index 16ed23ecd22..83266865537 100644 --- a/ocaml/xapi-idl/xen/dune +++ b/ocaml/xapi-idl/xen/dune @@ -30,7 +30,7 @@ ) (flags (:standard -w -27)) (wrapped false) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv))) + (preprocess (pps ppx_deriving_rpc))) (library (name xcp_xen) @@ -44,5 +44,4 @@ xapi-idl xapi-idl.xen.interface ) - (wrapped false) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv))) + (wrapped false)) From beab7a5266ef61eaa5a252abdb41828f3cfb59d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 17:18:34 +0100 Subject: [PATCH 290/341] [maintenance] only copy test_data when running tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'source_tree test_data' dependency also applied to the test executable. However this is only a runtime dependency, so split the rule and express the more granular dependency. Now 'dune build @check' creates a _build with 9185 files. Signed-off-by: Edwin Török --- ocaml/xapi-idl/lib_test/dune | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-idl/lib_test/dune b/ocaml/xapi-idl/lib_test/dune index c29a72fe6c6..689abf9b5eb 100644 --- a/ocaml/xapi-idl/lib_test/dune +++ b/ocaml/xapi-idl/lib_test/dune @@ -8,11 +8,9 @@ (wrapped false) ) -(test +(executable (name guard_interfaces_test) - (package xapi-idl) (modules guard_interfaces_test) - (deps (source_tree test_data)) (libraries test_lib xapi-idl.guard.privileged @@ -20,6 +18,13 @@ ) ) +(rule + (alias runtest) + (package xapi-idl) + (deps (:exe ./guard_interfaces_test.exe) (source_tree test_data/guard)) + (action (run %{exe})) +) + (test (name device_number_test) (package xapi-idl) @@ -36,7 +41,6 @@ (modes exe) (package xapi-idl) (modules (:standard \ idl_test_common guard_interfaces_test device_number_test)) - (deps (source_tree test_data)) (libraries alcotest cohttp_posix @@ -63,3 +67,10 @@ xapi-log ) (preprocess (per_module ((pps ppx_deriving_rpc) Task_server_test Updates_test)))) + +(rule + (alias runtest) + (package xapi-idl) + (deps (:exe ./test.exe) (source_tree test_data/guard)) + (action (run %{exe})) +) From b5ef5a6c02203e643316881e08b96bd7dad89740 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 17:56:20 +0100 Subject: [PATCH 291/341] [maintenance]: reduce run count for test_timer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We'll run the longer version in quicktests, using the QCHECK_LONG_FACTOR. Now the test takes ~3s to run instead of ~20s. Signed-off-by: Edwin Török --- ocaml/libs/clock/test_timer.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/libs/clock/test_timer.ml b/ocaml/libs/clock/test_timer.ml index b94a3c470d2..ecfafa8dcbd 100644 --- a/ocaml/libs/clock/test_timer.ml +++ b/ocaml/libs/clock/test_timer.ml @@ -7,7 +7,7 @@ let spans = let test_timer_remaining = let print = Fmt.to_to_string Mtime.Span.pp in - Test.make ~name:"Timer.remaining" ~print spans @@ fun duration -> + Test.make ~count:20 ~name:"Timer.remaining" ~print spans @@ fun duration -> let timer = Timer.start ~duration in let half = Timer.span_to_s duration /. 2. in let elapsed = Mtime_clock.counter () in From ec5e39fb239142f1301bb9dedf72fdd8652032b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 18:07:54 +0100 Subject: [PATCH 292/341] [maintenance]: speed up device_number_test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alcotest.check prints the message to stdout and flushes the formatter. This is called in a loop a million times, so this generates a lot of output, and is very slow. Use an empty message instead, which has an optimization in alcotest to avoid the formatter flushing, and set ~pos instead so that we get a unique error on failure. The test now takes 1.2s instead of 15s. Signed-off-by: Edwin Török --- ocaml/xapi-idl/lib_test/device_number_test.ml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-idl/lib_test/device_number_test.ml b/ocaml/xapi-idl/lib_test/device_number_test.ml index fc8d5b210f1..1e32115cd16 100644 --- a/ocaml/xapi-idl/lib_test/device_number_test.ml +++ b/ocaml/xapi-idl/lib_test/device_number_test.ml @@ -133,10 +133,13 @@ let test_2_way_convert = let original = of_disk_number hvm disk_number |> Option.get in let of_linux = of_linux_device (to_linux_device original) |> Option.get in let of_xenstore = of_xenstore_key (to_xenstore_key original) in - Alcotest.check device_number_equal_linux - "of_linux must be equal to original" original of_linux ; - Alcotest.check device_number "of_xenstore must be equal to original" - original of_xenstore + (* use ~pos instead of msg: a non-empty msg causes the formatter to be flushed, + and messages printed on stdout, which is very slow if we do this in a loop a million times + *) + Alcotest.check' ~pos:__POS__ ~msg:"" device_number_equal_linux + ~expected:original ~actual:of_linux ; + Alcotest.check' ~pos:__POS__ ~msg:"" device_number ~expected:original + ~actual:of_xenstore in let max_d = (1 lsl 20) - 1 in From 9269092ef6800ae80714b43c73c18f87f020d804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 18:12:46 +0100 Subject: [PATCH 293/341] [maintenance]: reduce iteration count for unixext_test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The proxy and time limited read tests took ~10s, they now take ~2s. A longer test will be run in quicktest with QCHECK_LONG_FACTOR. Signed-off-by: Edwin Török --- .../xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml index 7953076844b..d4e2d836675 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml @@ -115,7 +115,8 @@ let test_time_limited_write = let test_time_limited_read = let gen = Gen.tup2 Generate.t Generate.timeouts and print = Print.tup2 Generate.print Print.float in - Test.make ~name:__FUNCTION__ ~print gen @@ fun (behaviour, timeout) -> + Test.make ~count:20 ~name:__FUNCTION__ ~print gen + @@ fun (behaviour, timeout) -> skip_blk behaviour.kind ; skip_dirlnk behaviour.kind ; skip_blk_timed behaviour ; @@ -166,7 +167,7 @@ let test_time_limited_read = let test_proxy = let gen = Generate.t and print = Generate.print in - Test.make ~name:__FUNCTION__ ~print gen @@ fun behaviour -> + Test.make ~count:20 ~name:__FUNCTION__ ~print gen @@ fun behaviour -> if behaviour.kind <> Unix.S_SOCK then QCheck2.assume_fail () ; let test wrapped_fd = From 717a04119522b5dcf8d7056c496d97ae4c770dc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 18:25:03 +0100 Subject: [PATCH 294/341] [maintenance]: speed up vhd tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use ~pos:__POS__ "" instead of __LOC__ in Alcotest.check. This only prints the position on failure, previously __LOC__ was printed on success too. Signed-off-by: Edwin Török --- ocaml/libs/vhd/vhd_format_lwt_test/lib.ml | 8 ++++---- ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml | 15 ++++++++------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml b/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml index 3073ba88bca..68803676df3 100644 --- a/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml +++ b/ocaml/libs/vhd/vhd_format_lwt_test/lib.ml @@ -99,7 +99,7 @@ let check_written_sectors t expected = | false -> fail (Failure "read empty sector, expected data") | true -> - Alcotest.check cstruct __LOC__ data y ; + Alcotest.check cstruct ~pos:__POS__ "" data y ; return () ) >>= fun () -> loop xs @@ -139,10 +139,10 @@ let check_raw_stream_contents t expected = let actual = Cstruct.sub data (i * 512) 512 in ( if not (List.mem_assoc sector expected) then - Alcotest.check cstruct __LOC__ empty_sector actual + Alcotest.check cstruct ~pos:__POS__ "" empty_sector actual else let expected = List.assoc sector expected in - Alcotest.check cstruct __LOC__ expected actual + Alcotest.check cstruct ~pos:__POS__ "" expected actual ) ; check (i + 1) in @@ -163,7 +163,7 @@ let check_raw_stream_contents t expected = else let expected = List.assoc offset expected in let actual = Cstruct.sub remaining 0 F.sector_size in - Alcotest.check cstruct __LOC__ expected actual ; + Alcotest.check cstruct ~pos:__POS__ "" expected actual ; loop Int64.(add offset 1L) (Cstruct.shift remaining F.sector_size) in loop offset data diff --git a/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml b/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml index 458c0c7cce6..93ea89365cf 100644 --- a/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml +++ b/ocaml/libs/vhd/vhd_format_lwt_test/parse_test.ml @@ -51,9 +51,9 @@ let check_empty_disk size = let filename = make_new_filename () in Vhd_IO.create_dynamic ~filename ~size () >>= fun vhd -> Vhd_IO.openchain filename false >>= fun vhd' -> - Alcotest.check Lib.header __LOC__ vhd.Vhd.header vhd'.Vhd.header ; - Alcotest.check Lib.footer __LOC__ vhd.Vhd.footer vhd'.Vhd.footer ; - Alcotest.check Lib.bat __LOC__ vhd.Vhd.bat vhd'.Vhd.bat ; + Alcotest.check Lib.header ~pos:__POS__ "" vhd.Vhd.header vhd'.Vhd.header ; + Alcotest.check Lib.footer ~pos:__POS__ "" vhd.Vhd.footer vhd'.Vhd.footer ; + Alcotest.check Lib.bat ~pos:__POS__ "" vhd.Vhd.bat vhd'.Vhd.bat ; Vhd_IO.close vhd' >>= fun () -> Vhd_IO.close vhd (* Create a disk, resize it, check headers *) @@ -64,7 +64,8 @@ let check_resize size = let vhd = Vhd.resize vhd newsize in Vhd_IO.close vhd >>= fun () -> Vhd_IO.openchain filename false >>= fun vhd' -> - Alcotest.(check int64) __LOC__ newsize vhd.Vhd.footer.Footer.current_size ; + Alcotest.(check int64 ~pos:__POS__) + "" newsize vhd.Vhd.footer.Footer.current_size ; Vhd_IO.close vhd' (* Create a snapshot, check headers *) @@ -74,9 +75,9 @@ let check_empty_snapshot size = let filename = make_new_filename () in Vhd_IO.create_difference ~filename ~parent:vhd () >>= fun vhd' -> Vhd_IO.openchain filename false >>= fun vhd'' -> - Alcotest.check Lib.header __LOC__ vhd'.Vhd.header vhd''.Vhd.header ; - Alcotest.check Lib.footer __LOC__ vhd'.Vhd.footer vhd''.Vhd.footer ; - Alcotest.check Lib.bat __LOC__ vhd'.Vhd.bat vhd''.Vhd.bat ; + Alcotest.check Lib.header ~pos:__POS__ "" vhd'.Vhd.header vhd''.Vhd.header ; + Alcotest.check Lib.footer ~pos:__POS__ "" vhd'.Vhd.footer vhd''.Vhd.footer ; + Alcotest.check Lib.bat ~pos:__POS__ "" vhd'.Vhd.bat vhd''.Vhd.bat ; Vhd_IO.close vhd'' >>= fun () -> Vhd_IO.close vhd' >>= fun () -> Vhd_IO.close vhd From a1f994aff0092c645a0994f55dfed4037d8c053d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 18:37:23 +0100 Subject: [PATCH 295/341] [maintenance]: reduce sleeps in concur-rpc-test.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port this commit to concur-rpc-test.sh: 62ff5e74065c ("[maintenance]: reduce basic-rpc-test time") Reduces time from ~33s to ~10s. Signed-off-by: Edwin Török --- .../core_test/concur-rpc-test.sh | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/ocaml/message-switch/core_test/concur-rpc-test.sh b/ocaml/message-switch/core_test/concur-rpc-test.sh index a91768972fe..1403946ba5b 100755 --- a/ocaml/message-switch/core_test/concur-rpc-test.sh +++ b/ocaml/message-switch/core_test/concur-rpc-test.sh @@ -9,37 +9,36 @@ trap "cleanup" TERM INT function cleanup { rm -rf "${SWITCHPATH}" } +SECS=${SECS:-0.1} rm -rf "${SWITCHPATH}" && mkdir -p "${SWITCHPATH}" echo Test message switch concurrent processing echo Checking the switch can start late -test -x ./server_unix_main.exe || exit 1 -./server_unix_main.exe -path "$SPATH" & -sleep 1 -test -x ../switch/switch_main.exe && test -x ./client_unix_main.exe || exit 1 -../switch/switch_main.exe --path "$SPATH" --statedir "${SWITCHPATH}" & -./client_unix_main.exe -path "$SPATH" -secs 5 -sleep 2 +./server_unix_main.exe -path "${SPATH}" & +SERVER=$! +sleep "${SECS}" +../switch/switch_main.exe --path "${SPATH}" --statedir "${SWITCHPATH}" & +./client_unix_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Lwt to Lwt -test -x lwt/server_main.exe && test -x lwt/client_main.exe || exit 1 -lwt/server_main.exe -path "$SPATH" -concurrent & -lwt/client_main.exe -path "$SPATH" -secs 5 -sleep 2 +lwt/server_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +lwt/client_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Async to Lwt -test -x lwt/server_main.exe && test -x async/client_async_main.exe || exit 1 -lwt/server_main.exe -path "$SPATH" -concurrent & -async/client_async_main.exe -path "$SPATH" -secs 5 -sleep 2 +lwt/server_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" echo Performance test of Async to Async -test -x async/server_async_main.exe && test -x async/client_async_main.exe || exit 1 -async/server_async_main.exe -path "$SPATH" -concurrent & -async/client_async_main.exe -path "$SPATH" -secs 5 -sleep 2 +async/server_async_main.exe -path "${SPATH}" -concurrent & +SERVER=$! +async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +wait "${SERVER}" -../cli/main.exe shutdown --path "$SPATH" -sleep 2 +../cli/main.exe shutdown --path "${SPATH}" From 30f24ac5195b7e98b5a80ea274249b6fc88a2d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 3 Sep 2024 18:45:28 +0100 Subject: [PATCH 296/341] [maintenance]: vhd_format_lwt_test: speed up by using Cstruct.compare MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of comparing byte by byte use the more efficient comparison function that cstruct provides. This speeds up the test (from ~40s to ~33s). Signed-off-by: Edwin Török --- ocaml/libs/vhd/vhd_format/f.ml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ocaml/libs/vhd/vhd_format/f.ml b/ocaml/libs/vhd/vhd_format/f.ml index 6109c8aa713..e3bfc97a1fe 100644 --- a/ocaml/libs/vhd/vhd_format/f.ml +++ b/ocaml/libs/vhd/vhd_format/f.ml @@ -31,11 +31,7 @@ exception Cstruct_differ let cstruct_equal a b = let check_contents a b = try - for i = 0 to Cstruct.length a - 1 do - let a' = Cstruct.get_char a i in - let b' = Cstruct.get_char b i in - if a' <> b' then raise Cstruct_differ - done ; + if Cstruct.compare a b <> 0 then raise Cstruct_differ ; true with _ -> false in From 08eaef68ba9d6762933c9a356fbff0441652fed9 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Thu, 25 Apr 2024 09:36:28 +0100 Subject: [PATCH 297/341] Split generation of Types.java into separate functions Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/main.ml | 391 +++++++++---------------------------- 1 file changed, 92 insertions(+), 299 deletions(-) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index eaae4d2c1fb..7c95c20af8c 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -221,12 +221,8 @@ let field_default = function | Option _ -> "null" -(* Generate the class *) - let class_is_empty cls = cls.contents = [] - - (*This generates the special case code for marshalling the snapshot field in an Event.Record*) let generate_snapshot_hack = @@ -363,310 +359,107 @@ let rec gen_marshall_body = function let gen_error_field_name field = camel_case (String.concat "_" (Astring.String.cuts ~sep:" " field)) -let gen_error_field_names fields = List.map gen_error_field_name fields - -let gen_error_fields file field = - fprintf file " public final String %s;\n" field - -let gen_error file name params = - let name = exception_class_case name in - let fields = gen_error_field_names params.err_params in - let constructor_params = - String.concat ", " (List.map (fun field -> "String " ^ field) fields) - in - - fprintf file " /**\n" ; - fprintf file " * %s\n" (escape_xml params.err_doc) ; - fprintf file " */\n" ; - fprintf file " public static class %s extends XenAPIException {\n" name ; - - List.iter (gen_error_fields file) fields ; - - fprintf file "\n /**\n" ; - fprintf file " * Create a new %s\n" name ; - fprintf file " */\n" ; - fprintf file " public %s(%s) {\n" name constructor_params ; - fprintf file " super(\"%s\");\n" (escape_xml params.err_doc) ; - - List.iter (fun s -> fprintf file " this.%s = %s;\n" s s) fields ; - - fprintf file " }\n\n" ; - fprintf file " }\n\n" - -let gen_method_error_throw file name error = - let class_name = exception_class_case name in - let paramsStr = - String.concat ", " - (List.map - (fun i -> sprintf "p%i" i) - (range (List.length error.err_params)) - ) - in - - fprintf file " if (errorName.equals(\"%s\")){\n" name ; - - (* Prepare the parameters to the Exception constructor *) - List.iter - (fun i -> - fprintf file - " String p%i = errorData.length > %i ? errorData[%i] : \"\";\n" - i i i - ) - (range (List.length error.err_params)) ; - - fprintf file " throw new Types.%s(%s);\n" class_name paramsStr ; - fprintf file " }\n" - -let gen_types_class folder = - let class_name = "Types" in - let file = open_out (Filename.concat folder class_name ^ ".java") in - print_license file ; - fprintf file - {|package com.xensource.xenapi; -import java.util.*; -import com.fasterxml.jackson.annotation.JsonEnumDefaultValue; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.io.IOException; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * This class holds enum types and exceptions. - */ -public class Types -{ - /** - * Interface for all Record classes - */ - public interface Record - { - /** - * Convert a Record to a Map - */ - Map toMap(); - } - /** - * Base class for all XenAPI Exceptions - */ - public static class XenAPIException extends IOException { - public final String shortDescription; - public final String[] errorDescription; - XenAPIException(String shortDescription) - { - this.shortDescription = shortDescription; - this.errorDescription = null; - } - XenAPIException(String[] errorDescription) - { - this.errorDescription = errorDescription; - if (errorDescription.length > 0) - { - shortDescription = errorDescription[0]; - } else - { - shortDescription = ""; - } - } - public String toString() - { - if (errorDescription == null) - { - return shortDescription; - } else if (errorDescription.length == 0) - { - return ""; - } - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < errorDescription.length - 1; i++) - { - sb.append(errorDescription[i]); - } - sb.append(errorDescription[errorDescription.length - 1]); - return sb.toString(); - } - } - - /** - * Thrown if the response from the server contains an invalid status. - */ - public static class BadServerResponse extends XenAPIException - { - public BadServerResponse(JsonRpcResponseError responseError) - { - super(String.valueOf(responseError)); - } - } -|} ; - - fprintf file - {| /** - * Checks the provided server response was successful. If the call - * failed, throws a XenAPIException. If the server - * returned an invalid response, throws a BadServerResponse. - * Otherwise, returns the server response as passed in. - */ - public static void checkError(JsonRpcResponseError response) throws XenAPIException, BadServerResponse - { - var errorData = response.data; - if(errorData.length == 0){ - throw new BadServerResponse(response); - } - var errorName = response.message; -|} ; - - Hashtbl.iter (gen_method_error_throw file) Datamodel.errors ; - - fprintf file - {| - // An unknown error occurred - throw new Types.XenAPIException(errorData); -} - -|} ; - - gen_enums file ; - fprintf file "\n" ; - Hashtbl.iter (gen_error file) Datamodel.errors ; - fprintf file "\n" ; - TypeSet.iter (gen_marshall_func file) !types ; - fprintf file "\n" ; - TypeSet.iter (gen_task_result_func file) !types ; - fprintf file - {| - - public static class BadAsyncResult extends XenAPIException - { - public final String result; - - public BadAsyncResult(String result) - { - super(result); - this.result = result; - } - } - - private static String parseResult(String result) throws BadAsyncResult - { - Pattern pattern = Pattern.compile("(.*)"); - Matcher matcher = pattern.matcher(result); - if (!matcher.find() || matcher.groupCount() != 1) { - throw new Types.BadAsyncResult("Can't interpret: " + result); - } - - return matcher.group(1); - } - - public static EventBatch toEventBatch(Object object) { - if (object == null) { - return null; - } - Map map = (Map) object; - EventBatch batch = new EventBatch(); - batch.token = toString(map.get("token")); - batch.validRefCounts = map.get("valid_ref_counts"); - batch.events = toSetOfEventRecord(map.get("events")); - return batch; - } -} -|} - -(* Now run it *) - let populate_releases templdir class_dir = render_file ("APIVersion.mustache", "APIVersion.java") json_releases templdir class_dir -let populate_types templdir class_dir = +(* + Populate JSON object for the Types.java template +*) +let get_types_errors_json = let list_errors = Hashtbl.fold (fun k v acc -> (k, v) :: acc) Datamodel.errors [] in - let errors = - List.map - (fun (_, error) -> - let class_name = exception_class_case error.err_name in - let err_params = - List.mapi - (fun index value -> - `O - [ - ("name", `String (gen_error_field_name value)) - ; ("index", `Float (Int.to_float index)) - ; ("last", `Bool (index == List.length error.err_params - 1)) - ] - ) - error.err_params - in - `O - [ - ("description", `String (escape_xml error.err_doc)) - ; ("class_name", `String class_name) - ; ("err_params", `A err_params) - ] - ) - list_errors - in + List.map + (fun (_, error) -> + let class_name = exception_class_case error.err_name in + let err_params = + List.mapi + (fun index value -> + `O + [ + ("name", `String (gen_error_field_name value)) + ; ("index", `Float (Int.to_float index)) + ; ("last", `Bool (index == List.length error.err_params - 1)) + ] + ) + error.err_params + in + `O + [ + ("description", `String (escape_xml error.err_doc)) + ; ("class_name", `String class_name) + ; ("err_params", `A err_params) + ] + ) + list_errors + +let get_types_enums_json = let list_enums = Hashtbl.fold (fun k v acc -> (k, v) :: acc) enums [] in - let enums = - List.map - (fun (enum_name, enum_values) -> - let class_name = class_case enum_name in - let mapped_values = - List.map - (fun (name, description) -> - let escaped_description = - global_replace (regexp_string "*/") "* /" description - in - let final_description = - global_replace (regexp_string "\n") "\n * " - escaped_description - in - `O - [ - ("name", `String name) - ; ("name_uppercase", `String (enum_of_wire name)) - ; ("description", `String final_description) - ] - ) - enum_values - in - `O [("class_name", `String class_name); ("values", `A mapped_values)] - ) - list_enums - in + List.map + (fun (enum_name, enum_values) -> + let class_name = class_case enum_name in + let mapped_values = + List.map + (fun (name, description) -> + let escaped_description = + global_replace (regexp_string "*/") "* /" description + in + let final_description = + global_replace (regexp_string "\n") "\n * " + escaped_description + in + `O + [ + ("name", `String name) + ; ("name_uppercase", `String (enum_of_wire name)) + ; ("description", `String final_description) + ] + ) + enum_values + in + `O [("class_name", `String class_name); ("values", `A mapped_values)] + ) + list_enums + +let get_types_json types = let list_types = TypeSet.fold (fun t acc -> t :: acc) !types [] in - let types = - List.map - (fun t -> - let type_string = get_java_type t in - let class_name = class_case type_string in - let method_name = get_marshall_function t in - (*Every type which may be returned by a function may also be the result of the*) - (* corresponding asynchronous task. We therefore need to generate corresponding*) - (* marshalling functions which can take the raw xml of the tasks result field*) - (* and turn it into the corresponding type. Luckily, the only things returned by*) - (* asynchronous tasks are object references and strings, so rather than implementing*) - (* the general recursive structure we'll just make one for each of the classes*) - (* that's been registered as a marshall-needing type*) - let generate_reference_task_result_func = - match t with Ref _ -> true | _ -> false - in - `O - [ - ("name", `String type_string) - ; ("class_name", `String class_name) - ; ("method_name", `String method_name) - ; ( "suppress_unchecked_warning" - , `Bool (match t with Map _ | Record _ -> true | _ -> false) - ) - ; ( "generate_reference_task_result_func" - , `Bool generate_reference_task_result_func - ) - ; ("method_body", `String (gen_marshall_body t)) - ] - ) - list_types - in + List.map + (fun t -> + let type_string = get_java_type t in + let class_name = class_case type_string in + let method_name = get_marshall_function t in + (*Every type which may be returned by a function may also be the result of the*) + (* corresponding asynchronous task. We therefore need to generate corresponding*) + (* marshalling functions which can take the raw xml of the tasks result field*) + (* and turn it into the corresponding type. Luckily, the only things returned by*) + (* asynchronous tasks are object references and strings, so rather than implementing*) + (* the general recursive structure we'll just make one for each of the classes*) + (* that's been registered as a marshall-needing type*) + let generate_reference_task_result_func = + match t with Ref _ -> true | _ -> false + in + `O + [ + ("name", `String type_string) + ; ("class_name", `String class_name) + ; ("method_name", `String method_name) + ; ( "suppress_unchecked_warning" + , `Bool (match t with Map _ | Record _ -> true | _ -> false) + ) + ; ( "generate_reference_task_result_func" + , `Bool generate_reference_task_result_func + ) + ; ("method_body", `String (gen_marshall_body t)) + ] + ) + list_types + +let populate_types types templdir class_dir = + let errors = get_types_errors_json in + let enums = get_types_enums_json in + let types = get_types_json types in let json = `O [("errors", `A errors); ("enums", `A enums); ("types", `A types)] in @@ -922,7 +715,7 @@ let _ = let templdir = "templates" in let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in populate_releases templdir class_dir ; - populate_types templdir class_dir ; + populate_types types templdir class_dir ; List.iter (fun cls -> populate_class cls templdir class_dir) classes ; let uncommented_license = string_of_file "LICENSE" in From 268158af4e170496f55958dd2f158b9747246d03 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Thu, 25 Apr 2024 10:01:09 +0100 Subject: [PATCH 298/341] Split generation of classes into separate functions Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/main.ml | 269 +++++++++++++++++++++---------------- 1 file changed, 153 insertions(+), 116 deletions(-) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 7c95c20af8c..28f9c58051e 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -120,7 +120,9 @@ module TypeSet = Set.Make (Ty) let types = ref TypeSet.empty -(* Helper functions for types *) +(***************************************) +(* Helpers for generating Types.java *) +(***************************************) let rec get_java_type ty = types := TypeSet.add ty !types ; match ty with @@ -148,12 +150,9 @@ let rec get_java_type ty = | Option x -> get_java_type x -(*We'd like the list of XenAPI objects to appear as an enumeration so we can*) -(* switch on them, so add it using this mechanism*) let switch_enum = Enum ("XenAPIObjects", List.map (fun x -> (x.name, x.description)) classes) -(*Helper function for get_marshall_function*) let rec get_marshall_function_rec = function | SecretString | String -> "String" @@ -223,8 +222,6 @@ let field_default = function let class_is_empty cls = cls.contents = [] -(*This generates the special case code for marshalling the snapshot field in an Event.Record*) - let generate_snapshot_hack = {| Object a,b; @@ -364,9 +361,9 @@ let populate_releases templdir class_dir = ("APIVersion.mustache", "APIVersion.java") json_releases templdir class_dir -(* - Populate JSON object for the Types.java template -*) +(****************************************************) +(* Populate JSON object for the Types.java template *) +(****************************************************) let get_types_errors_json = let list_errors = Hashtbl.fold (fun k v acc -> (k, v) :: acc) Datamodel.errors [] @@ -465,37 +462,38 @@ let populate_types types templdir class_dir = in render_file ("Types.mustache", "Types.java") json templdir class_dir -let get_message_object cls message async_version params = - let is_method_async = async_version in - let return_type = - if is_method_async then - "Task" - else if - String.lowercase_ascii cls.name = "event" - && String.lowercase_ascii message.msg_name = "from" - then - "EventBatch" - else +(***************************************) +(* Helpers for generating class methods *) +(***************************************) +let get_message_return_type cls message is_method_async = + if is_method_async then + "Task" + else if + String.lowercase_ascii cls.name = "event" + && String.lowercase_ascii message.msg_name = "from" + then + "EventBatch" + else + get_java_type_or_void message.msg_result + +let get_message_return_description message = + match message.msg_result with + | None -> get_java_type_or_void message.msg_result - in - let return_description = - match message.msg_result with - | None -> - get_java_type_or_void message.msg_result - | Some (_, description) -> - description - in - let returns_void = message.msg_result = None && not async_version in - let record_parameters = - List.map - (fun parameter -> - `O [("name_camel", `String (camel_case parameter.param_name))] - ) - (List.filter - (function {param_type= Record _; _} -> true | _ -> false) - message.msg_params - ) - in + | Some (_, description) -> + description + +let get_message_return_parameters message = + List.map + (fun parameter -> + `O [("name_camel", `String (camel_case parameter.param_name))] + ) + (List.filter + (function {param_type= Record _; _} -> true | _ -> false) + message.msg_params + ) + +let get_message_deprecation_info message = let is_deprecated = match message.msg_release.internal_deprecated_since with | Some _ -> @@ -510,34 +508,37 @@ let get_message_object cls message async_version params = | None -> "" in - let type_reference = - if is_method_async then - "Task" - else if message.msg_result != None then - return_type - else - "" - in - let parameters = - List.map - (fun parameter -> - let publish_info = get_published_info_param message parameter in - let name_camel = camel_case parameter.param_name in - let description = escape_xml parameter.param_doc in - `O - [ - ("type", `String (get_java_type parameter.param_type)) - ; ( "is_record" - , `Bool - (match parameter.param_type with Record _ -> true | _ -> false) - ) - ; ("name_camel", `String name_camel) - ; ("description", `String description) - ; ("publish_info", `String publish_info) - ] - ) - params - in + (is_deprecated, deprecated_release) + +let get_message_type_reference is_method_async message return_type = + if is_method_async then + "Task" + else if message.msg_result != None then + return_type + else + "" + +let get_message_formatted_parameters parameters message = + List.map + (fun parameter -> + let publish_info = get_published_info_param message parameter in + let name_camel = camel_case parameter.param_name in + let description = escape_xml parameter.param_doc in + `O + [ + ("type", `String (get_java_type parameter.param_type)) + ; ( "is_record" + , `Bool + (match parameter.param_type with Record _ -> true | _ -> false) + ) + ; ("name_camel", `String name_camel) + ; ("description", `String description) + ; ("publish_info", `String publish_info) + ] + ) + parameters + +let get_message_errors message = let error_definitions = List.map (fun error -> @@ -546,14 +547,13 @@ let get_message_object cls message async_version params = ) message.msg_errors in - let errors = - List.map - (fun (name, description) -> - `O [("name", `String name); ("description", `String description)] - ) - error_definitions - in - let is_static = is_method_static message in + List.map + (fun (name, description) -> + `O [("name", `String name); ("description", `String description)] + ) + error_definitions + +let get_message_method_parameters parameters is_static message = let session_parameter = `O [ @@ -594,8 +594,25 @@ let get_message_object cls message async_version params = | `O h :: tail -> `O (("is_last", `Bool false) :: h) :: set_is_last tail acc in + set_is_last (extra_method_parameters @ parameters) [] + +let get_class_message_json cls message async_version params = + let is_method_async = async_version in + let return_type = get_message_return_type cls message is_method_async in + let return_description = get_message_return_description message in + let returns_void = message.msg_result = None && not async_version in + let record_parameters = get_message_return_parameters message in + let is_deprecated, deprecated_release = + get_message_deprecation_info message + in + let type_reference = + get_message_type_reference is_method_async message return_type + in + let parameters = get_message_formatted_parameters params message in + let errors = get_message_errors message in + let is_static = is_method_static message in let method_parameters = - set_is_last (extra_method_parameters @ parameters) [] + get_message_method_parameters parameters is_static message in `O [ @@ -620,9 +637,8 @@ let get_message_object cls message async_version params = ; ("errors", `A errors) ] -let populate_class cls templdir class_dir = +let get_class_fields_json cls = Hashtbl.replace records cls.name cls.contents ; - let class_name = class_case cls.name in let rec content_fields content namespace_name = match content with | Field f -> @@ -659,44 +675,65 @@ let populate_class cls templdir class_dir = | Namespace (name, contents) -> List.flatten (List.map (fun c -> content_fields c name) contents) in - let fields = - List.flatten (List.map (fun c -> content_fields c "") cls.contents) - in - let rec get_async_and_sync_methods methods acc = - match methods with - | [] -> - acc - | h :: tail -> - let get_variants messages = - (* we get the param groups outside of the mapping because we know it's always the same message *) - let params = gen_param_groups h h.msg_params in - match params with - | [] -> - List.map - (fun (message, is_async) -> (message, is_async, [])) - messages - | _ -> - List.map - (fun (message, is_async) -> - List.map (fun param -> (message, is_async, param)) params - ) - messages - |> List.flatten - in - if h.msg_async then - get_variants [(h, true); (h, false)] - @ get_async_and_sync_methods tail acc - else - get_variants [(h, false)] @ get_async_and_sync_methods tail acc - in - let async_and_sync_methods = get_async_and_sync_methods cls.messages [] in - let methods = - List.map - (fun (message, async_version, params) -> - get_message_object cls message async_version params - ) - async_and_sync_methods - in + List.flatten (List.map (fun c -> content_fields c "") cls.contents) + +(** [get_all_message_variants messages acc] takes a list of messages [messages] and an accumulator [acc], + and recursively constructs a list of tuples representing both asynchronous and synchronous variants of each message, + along with their associated parameters. If a message does not have an asynchronous version, this function simply returns + its synchronous version, with parameter information. + + For each message, if it has parameter information, the function generates all possible combinations of parameters + and pairs them with the message, marking each combination as either asynchronous or synchronous. Then, it constructs + a list of tuples containing each combination along with its associated message and its asynchronous/synchronous flag. + + @param messages a list of messages to process + @param acc an accumulator for collecting the constructed tuples + @return a list of tuples representing both asynchronous and synchronous variants of each message, + along with their associated parameters *) +let rec get_all_message_variants messages acc = + match messages with + | [] -> + acc + | h :: tail -> + let get_variants messages = + (* we get the param groups outside of the mapping because we know it's always the same message *) + let params = gen_param_groups h h.msg_params in + match params with + | [] -> + List.map + (fun (message, is_async) -> (message, is_async, [])) + messages + | _ -> + List.map + (fun (message, is_async) -> + List.map (fun param -> (message, is_async, param)) params + ) + messages + |> List.flatten + in + if h.msg_async then + get_variants [(h, false); (h, true)] @ get_all_message_variants tail acc + else + get_variants [(h, false)] @ get_all_message_variants tail acc + +let get_class_methods_json cls = + let messages = get_all_message_variants cls.messages [] in + List.map + (fun (message, async_version, params) -> + get_class_message_json cls message async_version params + ) + messages + +(***********************************************) +(* Populate JSON object for the class template *) +(***********************************************) +let populate_class cls templdir class_dir = + (*todo: is this still neeeded?!*) + Hashtbl.replace records cls.name cls.contents ; + + let class_name = class_case cls.name in + let fields = get_class_fields_json cls in + let methods = get_class_methods_json cls in let json = `O [ From a899ee6d42ca6ee98c89edb91dabb149899f94ab Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Thu, 25 Apr 2024 13:38:45 +0100 Subject: [PATCH 299/341] Add misc formatting adjustments Also adds a better definition of the compare function for module Ty. The previous one was leaving duplicates in the set by relying on the underlying `Set.compare`. Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/main.ml | 157 ++++++++++++++------ ocaml/sdk-gen/java/templates/Class.mustache | 65 ++++---- ocaml/sdk-gen/java/templates/Types.mustache | 56 ++++--- 3 files changed, 166 insertions(+), 112 deletions(-) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 28f9c58051e..58254d3517b 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -95,6 +95,15 @@ let camel_case s = in keyword_map result +let rec set_is_last params acc = + match params with + | [] -> + [] + | `O last :: [] -> + `O (("is_last", `Bool true) :: last) :: acc + | `O h :: tail -> + `O (("is_last", `Bool false) :: h) :: set_is_last tail acc + let exception_class_case x = String.concat "" (List.map @@ -109,11 +118,49 @@ let enums = Hashtbl.create 10 let records = Hashtbl.create 10 -(*We want an empty mutable set to keep the types in.*) +(** Module Ty: Representation an empty mutable set to keep the types in. *) module Ty = struct type t = DT.ty - let compare = compare + (** [stringify_type ty] converts a type [ty] into its string representation. + This aids in comparisons for the [compare] function. For generating string types + please use [get_java_type] instead. + @param ty The type to convert into a string representation. + @return A string representing the type [ty]. *) + let rec stringify_type ty = + match ty with + | SecretString | String -> + "String" + | Int -> + "Long" + | Float -> + "Double" + | Bool -> + "Boolean" + | DateTime -> + "Date" + | Enum (name, _) -> + sprintf "Types.%s" name + | Set t1 -> + sprintf "Set<%s>" (stringify_type t1) + | Map (t1, t2) -> + sprintf "Map<%s, %s>" (stringify_type t1) (stringify_type t2) + | Ref x -> + x + | Record x -> + sprintf "%s.Record" x + | Option x -> + stringify_type x + + (** [compare a1 a2] compares two types [a1] and [a2] based on their string representations. + It first converts the types into strings using [stringify_type], then compares the strings. + @param a1 The first type to compare. + @param a2 The second type to compare. + @return An integer representing the result of the comparison: + - 0 if [a1] is equal to [a2]. + - a negative integer if [a1] is less than [a2]. + - a positive integer if [a1] is greater than [a2]. *) + let compare a1 a2 = String.compare (stringify_type a1) (stringify_type a2) end module TypeSet = Set.Make (Ty) @@ -254,7 +301,7 @@ let gen_marshall_record_field prefix field = let ty = get_marshall_function field.ty in let name = String.concat "_" (List.rev (field.field_name :: prefix)) in let name' = camel_case name in - " record." ^ name ^ " = " ^ ty ^ "(map.get(\"" ^ name' ^ "\"));\n" + " record." ^ name' ^ " = " ^ ty ^ "(map.get(\"" ^ name ^ "\"));" let rec gen_marshall_record_namespace prefix (name, contents) = String.concat "\n" @@ -270,13 +317,13 @@ and gen_marshall_record_contents prefix = function let rec gen_marshall_body = function | SecretString | String -> - "return (String) object;\n" + "return (String) object;" | Int -> - "return Long.valueOf((String) object);\n" + "return Long.valueOf((String) object);" | Float -> - "return (Double) object;\n" + "return (Double) object;" | Bool -> - "return (Boolean) object;\n" + "return (Boolean) object;" | DateTime -> {| try { @@ -287,13 +334,13 @@ let rec gen_marshall_body = function return (new Date((long) (1000*Double.parseDouble((String) object)))); }|} | Ref ty -> - "return new" ^ class_case ty ^ "((String) object);\n" + "return new " ^ class_case ty ^ "((String) object);" | Enum (name, _) -> {|try { return |} ^ class_case name ^ {|.valueOf(((String) object).toUpperCase().replace('-','_')); - } catch (IllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { return |} ^ class_case name ^ {|.UNRECOGNIZED; @@ -304,13 +351,13 @@ let rec gen_marshall_body = function {|Object[] items = (Object[]) object; Set<|} ^ ty_name - ^ {|> result = new LinkedHashSet<>(); + ^ {|> result = new LinkedHashSet<>(); for(Object item: items) { |} ^ ty_name ^ {| typed = |} ^ marshall_fn - ^ {|(item); + ^ {|(item); result.add(typed); } return result;|} @@ -324,7 +371,7 @@ let rec gen_marshall_body = function ^ ty_name ^ {|,|} ^ ty_name' - ^ {|>(); + ^ {|>(); for(var entry: map.entrySet()) { var key = |} ^ marshall_fn @@ -338,18 +385,22 @@ let rec gen_marshall_body = function | Record ty -> let contents = Hashtbl.find records ty in let cls_name = class_case ty in - {|Map map = (Map) object;|} + "Map map = (Map) object;\n" + ^ " " ^ cls_name ^ {|.Record record = new |} ^ cls_name - ^ {| .Record(); |} - ^ String.concat "" (List.map (gen_marshall_record_contents []) contents) - ^ - (*Event.Record needs a special case to handle snapshots*) - if ty = "event" then - generate_snapshot_hack - else - " return record;" + ^ ".Record();\n" + ^ String.concat "\n" (List.map (gen_marshall_record_contents []) contents) + ^ ( if + (*Event.Record needs a special case to handle snapshots*) + ty = "event" + then + generate_snapshot_hack + else + "" + ) + ^ " \n return record;" | Option ty -> gen_marshall_body ty @@ -377,7 +428,7 @@ let get_types_errors_json = `O [ ("name", `String (gen_error_field_name value)) - ; ("index", `Float (Int.to_float index)) + ; ("index", `Float (Int.to_float (index + 1))) ; ("last", `Bool (index == List.length error.err_params - 1)) ] ) @@ -385,17 +436,18 @@ let get_types_errors_json = in `O [ - ("description", `String (escape_xml error.err_doc)) + ("name", `String error.err_name) + ; ("description", `String (escape_xml error.err_doc)) ; ("class_name", `String class_name) ; ("err_params", `A err_params) ] ) list_errors + |> List.rev -let get_types_enums_json = - let list_enums = Hashtbl.fold (fun k v acc -> (k, v) :: acc) enums [] in +let get_types_enums_json types = List.map - (fun (enum_name, enum_values) -> + (fun (_, enum_name, enum_values) -> let class_name = class_case enum_name in let mapped_values = List.map @@ -416,12 +468,16 @@ let get_types_enums_json = ) enum_values in - `O [("class_name", `String class_name); ("values", `A mapped_values)] + let mapped_values_with_is_last = set_is_last mapped_values [] in + `O + [ + ("class_name", `String class_name) + ; ("values", `A mapped_values_with_is_last) + ] ) - list_enums + types let get_types_json types = - let list_types = TypeSet.fold (fun t acc -> t :: acc) !types [] in List.map (fun t -> let type_string = get_java_type t in @@ -443,7 +499,13 @@ let get_types_json types = ; ("class_name", `String class_name) ; ("method_name", `String method_name) ; ( "suppress_unchecked_warning" - , `Bool (match t with Map _ | Record _ -> true | _ -> false) + , `Bool + ( match t with + | Map _ | Record _ | Option (Record _) | Option (Map _) -> + true + | _ -> + false + ) ) ; ( "generate_reference_task_result_func" , `Bool generate_reference_task_result_func @@ -451,14 +513,23 @@ let get_types_json types = ; ("method_body", `String (gen_marshall_body t)) ] ) - list_types + types let populate_types types templdir class_dir = + (* we manually add switch_enum here so it's added as an enum in Types.java *) + let list_types = TypeSet.fold (fun t acc -> t :: acc) !types [switch_enum] in + let sort_types ty1 ty2 = Ty.compare ty1 ty2 in + let list_sorted_types = List.sort sort_types list_types in + let list_sorted_enums = + List.filter_map + (fun x -> match x with Enum (name, ls) -> Some (x, name, ls) | _ -> None) + list_sorted_types + in + let types_json = get_types_json list_sorted_types in let errors = get_types_errors_json in - let enums = get_types_enums_json in - let types = get_types_json types in + let enums = get_types_enums_json list_sorted_enums in let json = - `O [("errors", `A errors); ("enums", `A enums); ("types", `A types)] + `O [("errors", `A errors); ("enums", `A enums); ("types", `A types_json)] in render_file ("Types.mustache", "Types.java") json templdir class_dir @@ -532,7 +603,9 @@ let get_message_formatted_parameters parameters message = (match parameter.param_type with Record _ -> true | _ -> false) ) ; ("name_camel", `String name_camel) - ; ("description", `String description) + ; ( "description" + , `String (if description = "" then "No description" else description) + ) ; ("publish_info", `String publish_info) ] ) @@ -585,15 +658,6 @@ let get_message_method_parameters parameters is_static message = | false, false -> [non_static_reference_parameter] in - let rec set_is_last params acc = - match params with - | [] -> - [] - | `O last :: [] -> - `O (("is_last", `Bool true) :: last) :: acc - | `O h :: tail -> - `O (("is_last", `Bool false) :: h) :: set_is_last tail acc - in set_is_last (extra_method_parameters @ parameters) [] let get_class_message_json cls message async_version params = @@ -728,9 +792,6 @@ let get_class_methods_json cls = (* Populate JSON object for the class template *) (***********************************************) let populate_class cls templdir class_dir = - (*todo: is this still neeeded?!*) - Hashtbl.replace records cls.name cls.contents ; - let class_name = class_case cls.name in let fields = get_class_fields_json cls in let methods = get_class_methods_json cls in @@ -752,8 +813,8 @@ let _ = let templdir = "templates" in let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in populate_releases templdir class_dir ; - populate_types types templdir class_dir ; List.iter (fun cls -> populate_class cls templdir class_dir) classes ; + populate_types types templdir class_dir ; let uncommented_license = string_of_file "LICENSE" in let class_license = open_out "autogen/xen-api/src/main/resources/LICENSE" in diff --git a/ocaml/sdk-gen/java/templates/Class.mustache b/ocaml/sdk-gen/java/templates/Class.mustache index 986a1fbee79..658deeb05f2 100644 --- a/ocaml/sdk-gen/java/templates/Class.mustache +++ b/ocaml/sdk-gen/java/templates/Class.mustache @@ -56,28 +56,28 @@ public class {{class_name}} extends XenAPIObject { {{/is_empty_class}} {{^is_empty_class}} /** - * The XenAPI reference (OpaqueRef) to this object. - */ + * The XenAPI reference (OpaqueRef) to this object. + */ protected final String ref; /** - * For internal use only. - */ + * For internal use only. + */ {{{class_name}}}(String ref) { this.ref = ref; } /** - * @return The XenAPI reference (OpaqueRef) to this object. - */ + * @return The XenAPI reference (OpaqueRef) to this object. + */ @JsonValue public String toWireString() { return this.ref; } /** - * If obj is a {{{class_name}}}, compares XenAPI references for equality. - */ + * If obj is a {{{class_name}}}, compares XenAPI references for equality. + */ @Override public boolean equals(Object obj) { @@ -105,25 +105,22 @@ public class {{class_name}} extends XenAPIObject { StringWriter writer = new StringWriter(); PrintWriter print = new PrintWriter(writer); {{#fields}} - print.printf("%1$20s: %2$s\n", "{{{name_camel}}}", this.{{{name_camel}}}); + print.printf("%1$20s: %2$s\n", "{{{name_camel}}}", this.{{{name_camel}}}); {{/fields}} {{#is_event_class}} - print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); + print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); {{/is_event_class}} return writer.toString(); } /** - * Convert a {{{class_name}}}.Record to a Map - */ + * Convert a {{{class_name}}}.Record to a Map + */ public Map toMap() { var map = new HashMap(); {{#fields}} map.put("{{{name}}}", this.{{{name_camel}}} == null ? {{{default_value}}} : this.{{{name_camel}}}); {{/fields}} - {{#is_event_class}} - print.printf("%1$20s: %2$s\n", "snapshot", this.snapshot); - {{/is_event_class}} return map; } @@ -149,30 +146,30 @@ public class {{class_name}} extends XenAPIObject { {{/is_empty_class}} {{#methods}} /** - * {{{description}}} - * Minimum allowed role: {{{minimum_allowed_role}}} - * {{{publish_info}}}{{#is_deprecated}} - * @deprecated since {{{deprecated_release}}}{{/is_deprecated}} - * - * @param c The connection the call is made on{{#parameters}} - * @param {{{name_camel}}} {{^description}}No description{{/description}}{{#description}}{{{.}}}{{/description}} {{{publish_info}}}{{/parameters}}{{^returns_void}} - * @return {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_description}}}{{/is_async}}{{/returns_void}} - * @throws BadServerResponse Thrown if the response from the server contains an invalid status. - * @throws XenAPIException if the call failed. - * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON.{{#errors}} - * @throws {{{name}}} {{{description}}}{{/errors}} - */{{#is_deprecated}} + * {{{description}}} + * Minimum allowed role: {{{minimum_allowed_role}}} + * {{{publish_info}}}{{#is_deprecated}} + * @deprecated since {{{deprecated_release}}}{{/is_deprecated}} + * + * @param c The connection the call is made on{{#parameters}} + * @param {{{name_camel}}} {{{description}}} {{{publish_info}}}{{/parameters}}{{^returns_void}} + * @return {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_description}}}{{/is_async}}{{/returns_void}} + * @throws BadServerResponse Thrown if the response from the server contains an invalid status. + * @throws XenAPIException if the call failed. + * @throws IOException if an error occurs during a send or receive. This includes cases where a payload is invalid JSON.{{#errors}} + * @throws {{{name}}} {{{description}}}{{/errors}} + */{{#is_deprecated}} @Deprecated(since = "{{{deprecated_release}}}"){{/is_deprecated}} public{{#is_static}} static{{/is_static}} {{#is_async}}Task{{/is_async}}{{^is_async}}{{{return_type}}}{{/is_async}} {{name_camel}}{{#is_async}}Async{{/is_async}}(Connection c{{#parameters}}, {{{type}}} {{{name_camel}}}{{/parameters}}) throws - BadServerResponse, - XenAPIException, - IOException{{#errors}}, + BadServerResponse, + XenAPIException, + IOException{{#errors}}, {{name}}{{/errors}} { - String methodCall = "{{#is_async}}Async.{{/is_async}}{{{object_name}}}.{{{name}}}"; - {{#supports_session}}String sessionReference = c.getSessionReference();{{/supports_session}}{{#method_parameters}}{{#is_record}} + String methodCall = "{{#is_async}}Async.{{/is_async}}{{{object_name}}}.{{{name}}}";{{#supports_session}} + String sessionReference = c.getSessionReference();{{/supports_session}}{{#method_parameters}}{{#is_record}} var {{{name_camel}}}_map = {{{name_camel}}}.toMap();{{/is_record}}{{/method_parameters}} Object[] methodParameters = { {{#method_parameters}}{{{name_camel}}}{{#is_record}}_map{{/is_record}}{{^is_last}}, {{/is_last}}{{/method_parameters}} };{{#type_reference}} - var typeReference = new TypeReference<{{{type_reference}}}>(){};{{/type_reference}} + var typeReference = new TypeReference<{{{.}}}>(){};{{/type_reference}} {{^returns_void}}return {{/returns_void}}c.dispatch(methodCall, methodParameters{{#type_reference}}, typeReference{{/type_reference}}); } diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache index 6af5336a3d0..9efae5e0d0c 100644 --- a/ocaml/sdk-gen/java/templates/Types.mustache +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -101,7 +101,6 @@ public class Types super(String.valueOf(responseError)); } } - /** * Checks the provided server response was successful. If the call * failed, throws a XenAPIException. If the server @@ -117,13 +116,12 @@ public class Types var errorName = response.message; {{#errors}} - if (errorName.equals("{{class_name}}")){ + if (errorName.equals("{{{name}}}")){ {{#err_params}} - String p{{index}} = errorData.length > {{index}} ? errorData[{{index}}] : ""; + String p{{{index}}} = errorData.length > {{{index}}} ? errorData[{{{index}}}] : ""; {{/err_params}} - throw new Types.{{class_name}}({{#err_params}}p{{index}}{{^last}}, {{/last}}{{/err_params}}); + throw new Types.{{{class_name}}}({{#err_params}}p{{{index}}}{{^last}}, {{/last}}{{/err_params}}); } - {{/errors}} // An unknown error occurred @@ -131,7 +129,7 @@ public class Types } {{#enums}} - public enum {{class_name}} { + public enum {{{class_name}}} { /** * The value does not belong to this enumeration */ @@ -139,62 +137,60 @@ public class Types UNRECOGNIZED, {{#values}} /** - * {{description}} + * {{{description}}} */ - @JsonProperty("{{name}}") - {{name_uppercase}}; + @JsonProperty("{{{name}}}") + {{{name_uppercase}}}{{^is_last}},{{/is_last}}{{#is_last}};{{/is_last}} {{/values}} public String toString() { - if (this == UNRECOGNIZED) return "UNRECOGNIZED"; - {{#values}} - if (this == {{name_uppercase}}) return "{{name}}"; - {{/values}} - + if (this == UNRECOGNIZED) return "UNRECOGNIZED";{{#values}} + if (this == {{{name_uppercase}}}) return "{{{name}}}";{{/values}} /* This can never be reached */ return "illegal enum"; } + } {{/enums}} - {{#errors}} /** - * {{description}} + * {{{description}}} */ - public static class {{class_name}} extends XenAPIException { - public final String proxies; + public static class {{{class_name}}} extends XenAPIException { + {{#err_params}} + public final String {{{name}}}; + {{/err_params}} /** - * Create a new {{class_name}} + * Create a new {{{class_name}}} */ - public {{class_name}}({{#err_params}}String {{name}}{{^last}}, {{/last}}{{/err_params}}) { - super("The PVS site contains running proxies.");# + public {{{class_name}}}({{#err_params}}String {{{name}}}{{^last}}, {{/last}}{{/err_params}}) { + super("{{{description}}}"); {{#err_params}} - this.{{name}} = {{name}}; + this.{{{name}}} = {{{name}}}; {{/err_params}} } } - {{/errors}} {{#types}} /** * Converts an {@link Object} to a {@link {{{name}}}} object. *
- * This method takes an {@link Object} as input and attempts to convert it into a {@link name} object. - * If the input object is null, the method returns null. Otherwise, it creates a new {@link name} + * This method takes an {@link Object} as input and attempts to convert it into a {@link {{{name}}}} object. + * If the input object is null, the method returns null. Otherwise, it creates a new {@link {{{name}}}} * object using the input object's {@link String} representation. *
- * @param object The {@link Object} to be converted to a {@link name} object. - * @return A {@link name} object created from the input {@link Object}'s {@link String} representation, + * @param object The {@link Object} to be converted to a {@link {{{name}}}} object. + * @return A {@link {{{name}}}} object created from the input {@link Object}'s {@link String} representation, * or null if the input object is null. * @deprecated this method will not be publicly exposed in future releases of this package. */ @Deprecated{{#suppress_unchecked_warning}} @SuppressWarnings("unchecked"){{/suppress_unchecked_warning}} - public static {{{name}}} {{method_name}}(Object object) { - if(object == null){ + public static {{{name}}} {{{method_name}}}(Object object) { + if (object == null) { return null; } {{{method_body}}} @@ -253,4 +249,4 @@ public class Types batch.events = toSetOfEventRecord(map.get("events")); return batch; } -} \ No newline at end of file +} From e72b173dcd968519a06f405784d57f1257a1edd0 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 5 Sep 2024 14:45:27 +0100 Subject: [PATCH 300/341] Do not ignore output of executing hooks This might be useful for debugging. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_hooks.ml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ocaml/xapi/xapi_hooks.ml b/ocaml/xapi/xapi_hooks.ml index abb29dd4f52..4e09084100c 100644 --- a/ocaml/xapi/xapi_hooks.ml +++ b/ocaml/xapi/xapi_hooks.ml @@ -71,11 +71,15 @@ let execute_hook ~__context ~script_name ~args ~reason = try debug "Executing hook '%s/%s' with args [ %s ]" script_name script (String.concat "; " args) ; - ignore - (Forkhelpers.execute_command_get_output - (Filename.concat script_dir script) - args - ) + let os, es = + Forkhelpers.execute_command_get_output + (Filename.concat script_dir script) + args + in + debug + "%s: Output of executing hook '%s/%s' with args [ %s ] is %s, err is \ + %s" + __FUNCTION__ script_name script (String.concat "; " args) os es with | Forkhelpers.Spawn_internal_error (_, stdout, Unix.WEXITED i) (* i<>0 since that case does not generate exn *) From ca7a981b3e57e0de3a930d6d100a2e967f8bd229 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 5 Sep 2024 14:46:01 +0100 Subject: [PATCH 301/341] New hook to execute before xapi shuts down Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_hooks.ml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ocaml/xapi/xapi_hooks.ml b/ocaml/xapi/xapi_hooks.ml index 4e09084100c..ecc1a258063 100644 --- a/ocaml/xapi/xapi_hooks.ml +++ b/ocaml/xapi/xapi_hooks.ml @@ -20,6 +20,8 @@ let scriptname__host_pre_declare_dead = "host-pre-declare-dead" let scriptname__host_post_declare_dead = "host-post-declare-dead" +let scriptname__xapi_pre_shutdown = "xapi-pre-shutdown" + (* Host Script hook reason codes *) let reason__fenced = "fenced" @@ -127,6 +129,12 @@ let host_pre_declare_dead ~__context ~host ~reason = () ) +let xapi_pre_shutdown ~__context ~host ~reason = + info "%s Running xapi pre shutdown hooks for %s" __FUNCTION__ + (Ref.string_of host) ; + execute_host_hook ~__context ~script_name:scriptname__xapi_pre_shutdown + ~reason ~host + (* Called when host died -- !! hook code in here to abort outstanding forwarded ops *) let internal_host_dead_hook __context host = info "Running host dead hook for %s" (Ref.string_of host) ; From c13b8da4a365f60f9d4b4a61a2ab3a0bb58fbc35 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 5 Sep 2024 14:46:43 +0100 Subject: [PATCH 302/341] CA-397788: Execute pre shutdown hook for xapi This code _synchronously_ executes the script inside the directory `/etc/xapi.d/xapi_pre_shutdown` and wait for them to complete before xapi shuts itself down. This is to allow any clean up work to be done, for example, the SM GC process to finish. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_host.ml | 3 +++ ocaml/xapi/xapi_host.mli | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 555cb3bb67c..897e8572e13 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -778,6 +778,9 @@ let restart_agent ~__context ~host:_ = let shutdown_agent ~__context = debug "Host.restart_agent: Host agent will shutdown in 1s!!!!" ; + let localhost = Helpers.get_localhost ~__context in + Xapi_hooks.xapi_pre_shutdown ~__context ~host:localhost + ~reason:Xapi_hooks.reason__clean_shutdown ; Xapi_fuse.light_fuse_and_dont_restart ~fuse_length:1. () let disable ~__context ~host = diff --git a/ocaml/xapi/xapi_host.mli b/ocaml/xapi/xapi_host.mli index 39f20223c13..0b61af641a2 100644 --- a/ocaml/xapi/xapi_host.mli +++ b/ocaml/xapi/xapi_host.mli @@ -77,7 +77,7 @@ val retrieve_wlb_evacuate_recommendations : val restart_agent : __context:'a -> host:'b -> unit -val shutdown_agent : __context:'a -> unit +val shutdown_agent : __context:Context.t -> unit val disable : __context:Context.t -> host:[`host] Ref.t -> unit From e84d92d25dcea22fda5963b7f86b9723cb70ed20 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Mon, 9 Sep 2024 17:00:58 +0100 Subject: [PATCH 303/341] Add sr to the Sr_unhealthy error constructor Where the first parameter represents the uuid of the SR, later used in generating the API error. Signed-off-by: Vincent Liu --- ocaml/xapi-idl/storage/storage_interface.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-idl/storage/storage_interface.ml b/ocaml/xapi-idl/storage/storage_interface.ml index 02513ae4936..4b3e03e48e9 100644 --- a/ocaml/xapi-idl/storage/storage_interface.ml +++ b/ocaml/xapi-idl/storage/storage_interface.ml @@ -354,7 +354,7 @@ module Errors = struct | Cancelled of string | Redirect of string option | Sr_attached of string - | Sr_unhealthy of sr_health + | Sr_unhealthy of string * sr_health | Unimplemented of string | Activated_on_another_host of uuid | Duplicated_key of string From b9e93695e4e39852fc5b9f1e1e734073b0b7cc04 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Wed, 21 Aug 2024 11:21:30 +0100 Subject: [PATCH 304/341] Add more description on sr health Aligned with https://xapi-project.github.io/xapi-storage/#volume-type-definitions Signed-off-by: Vincent Liu --- ocaml/idl/datamodel.ml | 9 +++++++-- ocaml/idl/schematest.ml | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 4c014ca939c..2efca2c2ce8 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -2791,8 +2791,13 @@ module Sr_stat = struct , [ ("healthy", "Storage is fully available") ; ("recovering", "Storage is busy recovering, e.g. rebuilding mirrors.") - ; ("unreachable", "Storage is unreachable") - ; ("unavailable", "Storage is unavailable") + ; ( "unreachable" + , "Storage is unreachable but may be recoverable with admin \ + intervention" + ) + ; ( "unavailable" + , "Storage is unavailable, a host reboot will be required" + ) ] ) diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index 9b25ca48aee..c375a909149 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "ce370e3b85178acfbcfce4963c4f8534" +let last_known_schema_hash = "428caff23cdb969c59a9960beefd7bb6" let current_schema_hash : string = let open Datamodel_types in From 0b22e4f9b235bb697a7c6c68f6bbec0117f96910 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Mon, 9 Sep 2024 14:54:35 +0100 Subject: [PATCH 305/341] Refactor sr_scan2_impl This is to help handling of retry logic in the next commit. It also de-nest the code (a bit). Signed-off-by: Vincent Liu --- ocaml/xapi-storage-script/main.ml | 116 ++++++++++++++---------------- 1 file changed, 55 insertions(+), 61 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 196e3edfc07..b7bf12d13cc 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1208,69 +1208,63 @@ let bind ~volume_script_dir = in S.SR.scan sr_scan_impl ; let sr_scan2_impl dbg sr = + let get_sr_info sr = + return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) + >>>= fun response -> + Deferred.Result.return + { + Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid + ; name_label= response.Xapi_storage.Control.name + ; name_description= response.Xapi_storage.Control.description + ; total_space= response.Xapi_storage.Control.total_space + ; free_space= response.Xapi_storage.Control.free_space + ; clustered= response.Xapi_storage.Control.clustered + ; health= + ( match response.Xapi_storage.Control.health with + | Xapi_storage.Control.Healthy _ -> + Healthy + | Xapi_storage.Control.Recovering _ -> + Recovering + | Xapi_storage.Control.Unreachable _ -> + Unreachable + | Xapi_storage.Control.Unavailable _ -> + Unavailable + ) + } + in + let get_volume_info sr sr_info = + return_volume_rpc (fun () -> + Sr_client.ls + (volume_rpc ~dbg ~compat_out:Compat.compat_out_volumes) + dbg sr + ) + >>>= fun response -> + let response = Array.to_list response in + (* Filter out volumes which are clone-on-boot transients *) + let transients = + List.fold + ~f:(fun set x -> + match + List.Assoc.find x.Xapi_storage.Control.keys _clone_on_boot_key + ~equal:String.equal + with + | None -> + set + | Some transient -> + Set.add set transient + ) + ~init:Core.String.Set.empty response + in + let response = + List.filter + ~f:(fun x -> not (Set.mem transients x.Xapi_storage.Control.key)) + response + in + Deferred.Result.return (List.map ~f:vdi_of_volume response, sr_info) + in Attached_SRs.find sr >>>= (fun sr -> - return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) - >>>= fun response -> - Deferred.Result.return - { - Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid - ; name_label= response.Xapi_storage.Control.name - ; name_description= response.Xapi_storage.Control.description - ; total_space= response.Xapi_storage.Control.total_space - ; free_space= response.Xapi_storage.Control.free_space - ; clustered= response.Xapi_storage.Control.clustered - ; health= - ( match response.Xapi_storage.Control.health with - | Xapi_storage.Control.Healthy _ -> - Healthy - | Xapi_storage.Control.Recovering _ -> - Recovering - | Xapi_storage.Control.Unreachable _ -> - Unreachable - | Xapi_storage.Control.Unavailable _ -> - Unavailable - ) - } - >>>= fun sr_info -> - match sr_info.health with - | Healthy -> - return_volume_rpc (fun () -> - Sr_client.ls - (volume_rpc ~dbg ~compat_out:Compat.compat_out_volumes) - dbg sr - ) - >>>= fun response -> - let response = Array.to_list response in - (* Filter out volumes which are clone-on-boot transients *) - let transients = - List.fold - ~f:(fun set x -> - match - List.Assoc.find x.Xapi_storage.Control.keys - _clone_on_boot_key ~equal:String.equal - with - | None -> - set - | Some transient -> - Set.add set transient - ) - ~init:Core.String.Set.empty response - in - let response = - List.filter - ~f:(fun x -> - not (Set.mem transients x.Xapi_storage.Control.key) - ) - response - in - Deferred.Result.return - (List.map ~f:vdi_of_volume response, sr_info) - | health -> - debug "%s: sr unhealthy %s" __FUNCTION__ - (Storage_interface.show_sr_health health) ; - Deferred.Result.fail - Storage_interface.(Errors.Sr_unhealthy health) + get_sr_info sr >>>= fun sr_info -> get_volume_info sr sr_info ) |> wrap in From 9b5d66f9094d6e7f7e8569bf16c77962d4e01222 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Mon, 9 Sep 2024 14:57:07 +0100 Subject: [PATCH 306/341] CP-49448: Add handling logic for SR health state For SMAPIv3 SRs, we add additional logic to handle the health state returned by SR.stat in sr_scan2_impl. This takes advantage of the additional information returned by the storage layer. Specifically, two new health states are handled as follows: - Unreachable: retry a few times before sending an error message to the user, suggesting retry later. - Unavailable: return an error message immediately and tell the user to reboot in order to recover from this error. Signed-off-by: Vincent Liu --- ocaml/idl/datamodel_errors.ml | 3 +++ ocaml/xapi-consts/api_errors.ml | 2 ++ ocaml/xapi-storage-script/main.ml | 24 +++++++++++++++++----- ocaml/xapi/storage_access.ml | 34 +++++++++++++++++++++++-------- 4 files changed, 50 insertions(+), 13 deletions(-) diff --git a/ocaml/idl/datamodel_errors.ml b/ocaml/idl/datamodel_errors.ml index 3071a4add47..aead3e0abc4 100644 --- a/ocaml/idl/datamodel_errors.ml +++ b/ocaml/idl/datamodel_errors.ml @@ -1256,6 +1256,9 @@ let _ = () ; error Api_errors.sr_is_cache_sr ["host"] ~doc:"The SR is currently being used as a local cache SR." () ; + error Api_errors.sr_unhealthy ["sr"; "health"; "fix"] + ~doc:"The SR is currently unhealthy. See the suggestion on how to fix it." + () ; error Api_errors.clustered_sr_degraded ["sr"] ~doc: "An SR is using clustered local storage. It is not safe to reboot a host \ diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index 53d9684561f..97880cde57a 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -512,6 +512,8 @@ let sr_requires_upgrade = add_error "SR_REQUIRES_UPGRADE" let sr_is_cache_sr = add_error "SR_IS_CACHE_SR" +let sr_unhealthy = add_error "SR_UNHEALTHY" + let vdi_in_use = add_error "VDI_IN_USE" let vdi_is_sharable = add_error "VDI_IS_SHARABLE" diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index b7bf12d13cc..74ea3bb8d9f 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1208,6 +1208,7 @@ let bind ~volume_script_dir = in S.SR.scan sr_scan_impl ; let sr_scan2_impl dbg sr = + let sr_uuid = Storage_interface.Sr.string_of sr in let get_sr_info sr = return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) >>>= fun response -> @@ -1262,11 +1263,24 @@ let bind ~volume_script_dir = in Deferred.Result.return (List.map ~f:vdi_of_volume response, sr_info) in - Attached_SRs.find sr - >>>= (fun sr -> - get_sr_info sr >>>= fun sr_info -> get_volume_info sr sr_info - ) - |> wrap + let rec stat_with_retry ?(times = 3) sr = + get_sr_info sr >>>= fun sr_info -> + match sr_info.health with + | Healthy -> + debug "%s sr %s is healthy" __FUNCTION__ sr_uuid ; + get_volume_info sr sr_info + | Unreachable when times > 0 -> + debug "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ + sr_uuid times ; + Clock.after Time.Span.second >>= fun () -> + stat_with_retry ~times:(times - 1) sr + | health -> + debug "%s: sr unhealthy because it is %s" __FUNCTION__ + (Storage_interface.show_sr_health health) ; + Deferred.Result.fail + Storage_interface.(Errors.Sr_unhealthy (sr_uuid, health)) + in + Attached_SRs.find sr >>>= stat_with_retry |> wrap in S.SR.scan2 sr_scan2_impl ; let vdi_create_impl dbg sr (vdi_info : Storage_interface.vdi_info) = diff --git a/ocaml/xapi/storage_access.ml b/ocaml/xapi/storage_access.ml index a307eb48bdd..c92651bc576 100644 --- a/ocaml/xapi/storage_access.ml +++ b/ocaml/xapi/storage_access.ml @@ -31,6 +31,11 @@ let s_of_vdi = Vdi.string_of let s_of_sr = Sr.string_of let transform_storage_exn f = + let get_sr_ref sr_uuid = + Server_helpers.exec_with_new_task "transform_storage_exn" (fun __context -> + Db.SR.get_by_uuid ~__context ~uuid:sr_uuid + ) + in try f () with | Storage_error (Backend_error (code, params)) as e -> Backtrace.reraise e (Api_errors.Server_error (code, params)) @@ -39,17 +44,30 @@ let transform_storage_exn f = let backtrace = Backtrace.Interop.of_json "SM" backtrace in Backtrace.add e backtrace ; Backtrace.reraise e (Api_errors.Server_error (code, params)) + | Storage_error (Sr_unhealthy (sr, health)) as e -> + let advice = + match health with + | Unavailable -> + "try reboot" + | Unreachable -> + "try again later" + | _health -> + "" + in + let sr = get_sr_ref sr in + Backtrace.reraise e + (Api_errors.Server_error + ( Api_errors.sr_unhealthy + , [Ref.string_of sr; Storage_interface.show_sr_health health; advice] + ) + ) | Api_errors.Server_error _ as e -> raise e | Storage_error (No_storage_plugin_for_sr sr) as e -> - Server_helpers.exec_with_new_task "transform_storage_exn" - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Backtrace.reraise e - (Api_errors.Server_error - (Api_errors.sr_not_attached, [Ref.string_of sr]) - ) - ) + let sr = get_sr_ref sr in + Backtrace.reraise e + (Api_errors.Server_error (Api_errors.sr_not_attached, [Ref.string_of sr]) + ) | e -> Backtrace.reraise e (Api_errors.Server_error From 3ba6e6df2b321c0ff8323155532c84f325340208 Mon Sep 17 00:00:00 2001 From: Gabriel Buica Date: Tue, 3 Sep 2024 13:12:43 +0100 Subject: [PATCH 307/341] CP-51352: Compare before setting a new value in `last_active` Experimenting with xapi under different loads of `xe vm-list`s revealed that xapi often waits a considerable amount of time for the database lock. e.g. Attempting to start a VM under a constant load of 20 `xe vm-list`s can take up to 30s and the total amount of time that xapi requests would spend in `Db_actions.Session.set_last_active` would be close to 9s. It seems that each API call does a `Session_check.check` implicitly updating the last time the current session is been used. From what I gathered, this value is used for the garbage collection of session where the current default threshold value is 24 hours, `Xapi.globs.inactive_session_timeout`. To avoid holding the database lock to update the `last_active` field every time, this commit gets the current value of `last_active` and compares it with the current time. If the difference is greater than 10 minutes only then we set a new value in the field. Otherwise, we continue without writing in the database. This has the following improvement on the total time of `xe vm-start` under different loads: - 20 `xe vm-list`: - - Before: 30.670s | 30.086s | 30.958s | - - After: 28.185s | 28.383s | 28.909s | - 80 `xe vm-list`: - - Before: 2m38.425s | 2m43.539s | 2m39.045s | - - After: 1m46.266s | 1m49.327s | 1m36.825s | For smaller loads, the improvement is between 5-10%. Whereas for bigger loads, the improvement is much more visible. Signed-off-by: Gabriel Buica --- ocaml/xapi/session_check.ml | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi/session_check.ml b/ocaml/xapi/session_check.ml index 27812fc5244..63ac8fe6e95 100644 --- a/ocaml/xapi/session_check.ml +++ b/ocaml/xapi/session_check.ml @@ -53,9 +53,33 @@ let check ~intra_pool_only ~session_id ~action = if (not pool) && not (Pool_role.is_master ()) then raise Non_master_login_on_slave ; if Pool_role.is_master () then - Db_actions.DB_Action.Session.set_last_active ~__context - ~self:session_id - ~value:(Xapi_stdext_date.Date.of_float (Unix.time ())) + (* before updating the last_active field, check if the field has been + already updated recently. This avoids holding the database lock too often.*) + let n = Xapi_stdext_date.Date.now () in + let last_active = + Db_actions.DB_Action.Session.get_last_active ~__context + ~self:session_id + in + let ptime_now = Xapi_stdext_date.Date.to_ptime n in + let refresh_threshold = + let last_active_ptime = + Xapi_stdext_date.Date.to_ptime last_active + in + match + Ptime.add_span last_active_ptime (Ptime.Span.of_int_s 600) + with + | None -> + let err_msg = + "Can't add the configurable threshold of last active to \ + the current time." + in + raise Api_errors.(Server_error (internal_error, [err_msg])) + | Some ptime -> + ptime + in + if Ptime.is_later ptime_now ~than:refresh_threshold then + Db_actions.DB_Action.Session.set_last_active ~__context + ~self:session_id ~value:n with | Db_exn.DBCache_NotFound (_, _, reference) -> info From 67df15711c502a7f933fce7d46d1139b22779f87 Mon Sep 17 00:00:00 2001 From: Gabriel Buica Date: Tue, 3 Sep 2024 14:37:03 +0100 Subject: [PATCH 308/341] CP-51352: Configurable threshold for updating `last_active` Follow up to the previous commit, this makes the hardcoded threshold of 10 mins configurable from `xapi.conf`. This enables flexibilty in case different values are needed for updating the `last_active` field of a session more or less often. Signed-off-by: Gabriel Buica --- ocaml/xapi/session_check.ml | 3 ++- ocaml/xapi/xapi_globs.ml | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/session_check.ml b/ocaml/xapi/session_check.ml index 63ac8fe6e95..d30dbb6d4e3 100644 --- a/ocaml/xapi/session_check.ml +++ b/ocaml/xapi/session_check.ml @@ -66,7 +66,8 @@ let check ~intra_pool_only ~session_id ~action = Xapi_stdext_date.Date.to_ptime last_active in match - Ptime.add_span last_active_ptime (Ptime.Span.of_int_s 600) + Ptime.add_span last_active_ptime + !Xapi_globs.threshold_last_active with | None -> let err_msg = diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 37c62e04e9f..56fbce47edd 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -712,6 +712,10 @@ let host_assumed_dead_interval = ref Mtime.Span.(10 * min) (* If a session has a last_active older than this we delete it *) let inactive_session_timeout = ref 86400. (* 24 hrs in seconds *) +(* If a session was refreshed more recently than threshold_last_active do not refresh it again. *) +let threshold_last_active = ref (Ptime.Span.of_int_s 600) +(* 10 min in seconds *) + let pending_task_timeout = ref 86400. (* 24 hrs in seconds *) let completed_task_timeout = ref 3900. (* 65 mins *) @@ -1631,6 +1635,11 @@ let other_options = , (fun () -> string_of_int !external_authentication_cache_size) , "Specify the maximum capacity of the external authentication cache" ) + ; ( "threshold_last_active" + , Arg.Int (fun t -> threshold_last_active := Ptime.Span.of_int_s t) + , (fun () -> Format.asprintf "%a" Ptime.Span.pp !threshold_last_active) + , "Specify the threshold below which we do not refresh the session" + ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. From c42ba54f863ef1fa59da9820fb4044a9a5ec90ca Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Thu, 25 Apr 2024 10:21:06 +0100 Subject: [PATCH 309/341] Add Java SDK to SDK actions Signed-off-by: Danilo Del Busso --- .github/workflows/generate-and-build-sdks.yml | 39 +++++++++++++++++++ .github/workflows/release.yml | 6 +++ 2 files changed, 45 insertions(+) diff --git a/.github/workflows/generate-and-build-sdks.yml b/.github/workflows/generate-and-build-sdks.yml index 87da4b1d8f5..14ed609e7cd 100644 --- a/.github/workflows/generate-and-build-sdks.yml +++ b/.github/workflows/generate-and-build-sdks.yml @@ -55,6 +55,12 @@ jobs: _build/install/default/xapi/sdk/go/* !_build/install/default/xapi/sdk/go/dune + - name: Store Java SDK source + uses: actions/upload-artifact@v4 + with: + name: SDK_Source_Java + path: _build/install/default/xapi/sdk/java/* + - name: Trim dune cache run: opam exec -- dune cache trim --size=2GiB @@ -84,6 +90,39 @@ jobs: source/* !source/src/*.o + build-java-sdk: + name: Build Java SDK + runs-on: ubuntu-latest + needs: generate-sdk-sources + steps: + - name: Install dependencies + run: sudo apt-get install maven + + - name: Retrieve Java SDK source + uses: actions/download-artifact@v4 + with: + name: SDK_Source_Java + path: source/ + + - name: Set up JDK 17 + uses: actions/setup-java@v4 + with: + java-version: '17' + distribution: 'temurin' + + - name: Build Java SDK + shell: bash + run: | + xapi_version="${{ inputs.xapi_version }}" + xapi_version="${xapi_version//v/}" + mkdir -p target && mvn -f source/xen-api/pom.xml -B -Drevision=$xapi_version-prerelease clean package && mv source/xen-api/target/*.jar target/ + + - name: Store Java SDK + uses: actions/upload-artifact@v4 + with: + name: SDK_Artifacts_Java + path: target/* + build-csharp-sdk: name: Build C# SDK runs-on: windows-2022 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9a051ef15f9..8ed3c26d2af 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -64,6 +64,12 @@ jobs: name: SDK_Artifacts_C path: libxenserver/usr/local/ + - name: Retrieve Java SDK distribution artifacts + uses: actions/download-artifact@v4 + with: + name: SDK_Artifacts_Java + path: dist/ + - name: Retrieve C# SDK distribution artifacts uses: actions/download-artifact@v4 with: From 70680834e2c116d00a134f3ed000fed74a8680f2 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Thu, 9 May 2024 09:54:45 +0100 Subject: [PATCH 310/341] Move `BadAsyncResult` at the bottom of exception list Also update wording in `parseResult` Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/templates/Types.mustache | 27 +++++++++++---------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache index 9efae5e0d0c..1af54721669 100644 --- a/ocaml/sdk-gen/java/templates/Types.mustache +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -174,6 +174,19 @@ public class Types } {{/errors}} + public static class BadAsyncResult extends XenAPIException { + public final String result; + + /** + * Create a new BadAsyncResult + */ + public BadAsyncResult(String result) + { + super(result); + this.result = result; + } + } + {{#types}} /** * Converts an {@link Object} to a {@link {{{name}}}} object. @@ -215,24 +228,12 @@ public class Types {{/generate_reference_task_result_func}} {{/types}} - - public static class BadAsyncResult extends XenAPIException - { - public final String result; - - public BadAsyncResult(String result) - { - super(result); - this.result = result; - } - } - private static String parseResult(String result) throws BadAsyncResult { Pattern pattern = Pattern.compile("(.*)"); Matcher matcher = pattern.matcher(result); if (!matcher.find() || matcher.groupCount() != 1) { - throw new Types.BadAsyncResult("Can't interpret: " + result); + throw new Types.BadAsyncResult("Can't parse: " + result); } return matcher.group(1); From b1f6341b30cc832ada79234b94d052a254207a57 Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Mon, 5 Aug 2024 14:25:11 +0100 Subject: [PATCH 311/341] Apply changes added in eb34314f0c946fe2ae6dc63cb8079f2b473c8c90 After the rebase the change regressed Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/templates/Types.mustache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache index 1af54721669..6c56406301a 100644 --- a/ocaml/sdk-gen/java/templates/Types.mustache +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -147,7 +147,7 @@ public class Types if (this == UNRECOGNIZED) return "UNRECOGNIZED";{{#values}} if (this == {{{name_uppercase}}}) return "{{{name}}}";{{/values}} /* This can never be reached */ - return "illegal enum"; + return "UNRECOGNIZED"; } } From 9466a7e0506df654221ede150c0521252978c46e Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Tue, 10 Sep 2024 09:56:13 +0100 Subject: [PATCH 312/341] Fix spacing in Types.mustache Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/java/templates/Types.mustache | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/sdk-gen/java/templates/Types.mustache b/ocaml/sdk-gen/java/templates/Types.mustache index 6c56406301a..4da97c774cd 100644 --- a/ocaml/sdk-gen/java/templates/Types.mustache +++ b/ocaml/sdk-gen/java/templates/Types.mustache @@ -101,6 +101,7 @@ public class Types super(String.valueOf(responseError)); } } + /** * Checks the provided server response was successful. If the call * failed, throws a XenAPIException. If the server @@ -149,7 +150,6 @@ public class Types /* This can never be reached */ return "UNRECOGNIZED"; } - } {{/enums}} @@ -172,8 +172,8 @@ public class Types {{/err_params}} } } - {{/errors}} + {{/errors}} public static class BadAsyncResult extends XenAPIException { public final String result; From ce3bf5ba1571e4259a36a72641c6cabb9a95e46d Mon Sep 17 00:00:00 2001 From: Danilo Del Busso Date: Fri, 13 Sep 2024 14:28:52 +0100 Subject: [PATCH 313/341] Fix syntax in CustomDateDeserializer.java Regression introduced by a incorrect merge conflict resolution after a rebase on master Signed-off-by: Danilo Del Busso --- .../java/com/xensource/xenapi/CustomDateDeserializer.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java index e397ba7e27f..3ba135e0a40 100644 --- a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java +++ b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/CustomDateDeserializer.java @@ -151,8 +151,6 @@ public CustomDateDeserializer(Class t) { } } - private static - /** * Deserializes a {@link Date} object from the given JSON parser. * @@ -162,8 +160,8 @@ public CustomDateDeserializer(Class t) { * @return The deserialized {@link Date} object * @throws IOException if an I/O error occurs during deserialization */ - @Override public Date deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) - throws IOException { + @Override + public Date deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { var text = jsonParser.getText(); for (SimpleDateFormat formatter : dateFormatsUtc) { try { From 347f6e38e2c59fe1bc035aa4a562bf3b64a744c3 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 11 Jul 2024 11:39:38 +0100 Subject: [PATCH 314/341] xapi-stdext-date: Mark deprecated bindings as such And replace its users Signed-off-by: Pau Ruiz Safont --- doc/content/xapi/storage/sxm.md | 4 +-- ocaml/alerts/expiry_alert.ml | 4 +-- ocaml/idl/datamodel.ml | 17 +++++----- ocaml/idl/datamodel_certificate.ml | 4 +-- ocaml/idl/datamodel_host.ml | 3 +- ocaml/idl/datamodel_types.ml | 8 ++--- ocaml/idl/datamodel_types.mli | 6 ++-- ocaml/idl/datamodel_values.ml | 6 ++-- ocaml/idl/datamodel_vm.ml | 2 +- ocaml/idl/json_backend/gen_json.ml | 2 +- ocaml/idl/ocaml_backend/gen_api.ml | 6 ++-- ocaml/idl/ocaml_backend/gen_db_actions.ml | 4 +-- ocaml/idl/ocaml_backend/gen_test.ml | 2 +- ocaml/idl/ocaml_backend/ocaml_utils.ml | 2 +- ocaml/libs/http-lib/xMLRPC.ml | 4 +-- ocaml/libs/http-lib/xMLRPC.mli | 4 +-- .../xapi-stdext/lib/xapi-stdext-date/date.mli | 12 +++++-- ocaml/license/daily_license_check.ml | 4 +-- ocaml/quicktest/qt.ml | 6 ++-- ocaml/quicktest/qt.mli | 2 +- ocaml/quicktest/quicktest_date.ml | 6 ++-- ocaml/quicktest/quicktest_event.ml | 2 +- ocaml/quicktest/quicktest_vdi.ml | 4 +-- ocaml/sdk-gen/csharp/gen_csharp_binding.ml | 2 +- .../alerts/test_alert_certificate_check.ml | 2 +- .../tests/alerts/test_daily_license_check.ml | 2 +- ocaml/tests/alerts/test_expiry_alert.ml | 2 +- ocaml/tests/common/test_common.ml | 8 ++--- ocaml/tests/test_client.ml | 2 +- ocaml/tests/test_pool_license.ml | 26 +++++++------- ocaml/tests/test_session.ml | 4 +-- ocaml/tests/test_updateinfo.ml | 22 ++++++------ ocaml/tests/test_xapi_db_upgrade.ml | 6 ++-- ocaml/xapi-cli-server/cli_operations.ml | 12 +++---- ocaml/xapi-cli-server/records.ml | 34 ++++++++++--------- ocaml/xapi-idl/storage/storage_interface.ml | 3 +- ocaml/xapi-storage-script/main.ml | 4 +-- ocaml/xapi/certificates.ml | 2 +- ocaml/xapi/create_misc.ml | 6 ++-- ocaml/xapi/db_gc_util.ml | 14 ++++---- ocaml/xapi/dbsync.ml | 2 +- ocaml/xapi/dbsync_master.ml | 9 +++-- ocaml/xapi/dbsync_slave.ml | 10 +++--- ocaml/xapi/debug_populate.ml | 2 +- ocaml/xapi/export.ml | 2 +- ocaml/xapi/license_check.ml | 4 +-- ocaml/xapi/license_check.mli | 4 +-- ocaml/xapi/message_forwarding.ml | 2 +- ocaml/xapi/monitor_master.ml | 7 ++-- ocaml/xapi/pool_periodic_update_sync.ml | 2 +- ocaml/xapi/storage_smapiv1.ml | 6 ++-- ocaml/xapi/storage_smapiv1_wrapper.ml | 2 +- ocaml/xapi/taskHelper.ml | 2 +- ocaml/xapi/updateinfo.ml | 4 +-- ocaml/xapi/vpx.ml | 6 ++-- ocaml/xapi/xapi_blob.ml | 4 +-- ocaml/xapi/xapi_guest_agent.ml | 4 +-- ocaml/xapi/xapi_ha.ml | 2 +- ocaml/xapi/xapi_host.ml | 7 ++-- ocaml/xapi/xapi_host.mli | 5 ++- ocaml/xapi/xapi_host_crashdump.ml | 2 +- ocaml/xapi/xapi_local_session.ml | 14 ++------ ocaml/xapi/xapi_local_session.mli | 6 +--- ocaml/xapi/xapi_message.ml | 23 +++++++------ ocaml/xapi/xapi_pif.ml | 2 +- ocaml/xapi/xapi_pool.ml | 8 ++--- ocaml/xapi/xapi_pool.mli | 2 +- ocaml/xapi/xapi_pool_license.ml | 4 +-- ocaml/xapi/xapi_pool_patch.ml | 2 +- ocaml/xapi/xapi_session.ml | 30 ++++++++-------- ocaml/xapi/xapi_session.mli | 2 +- ocaml/xapi/xapi_sr.ml | 6 ++-- ocaml/xapi/xapi_vbd.ml | 2 +- ocaml/xapi/xapi_vbd_helpers.ml | 3 +- ocaml/xapi/xapi_vdi.mli | 6 ++-- ocaml/xapi/xapi_vif_helpers.ml | 3 +- ocaml/xapi/xapi_vm.ml | 6 ++-- ocaml/xapi/xapi_vm_clone.ml | 4 +-- ocaml/xapi/xapi_vm_helpers.ml | 10 +++--- ocaml/xapi/xapi_vm_migrate.ml | 4 +-- ocaml/xapi/xapi_vmss.ml | 2 +- ocaml/xapi/xapi_xenops.ml | 10 +++--- ocaml/xenopsd/lib/suspend_image.ml | 2 +- ocaml/xenopsd/lib/xenops_server.ml | 2 +- ocaml/xenopsd/xc/memory_breakdown.ml | 2 +- ocaml/xenopsd/xc/memory_summary.ml | 2 +- 86 files changed, 247 insertions(+), 263 deletions(-) diff --git a/doc/content/xapi/storage/sxm.md b/doc/content/xapi/storage/sxm.md index 8429f87321c..ee3b90276cc 100644 --- a/doc/content/xapi/storage/sxm.md +++ b/doc/content/xapi/storage/sxm.md @@ -450,8 +450,8 @@ but we've still got a bit of thinking to do: we sort the VDIs to copy based on a let compare_fun v1 v2 = let r = Int64.compare v1.size v2.size in if r = 0 then - let t1 = Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in - let t2 = Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in + let t1 = Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in + let t2 = Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in compare t1 t2 else r in let all_vdis = all_vdis |> List.sort compare_fun in diff --git a/ocaml/alerts/expiry_alert.ml b/ocaml/alerts/expiry_alert.ml index 1ea19844ba6..d2667ff35df 100644 --- a/ocaml/alerts/expiry_alert.ml +++ b/ocaml/alerts/expiry_alert.ml @@ -50,7 +50,7 @@ let all_messages rpc session_id = let message_body msg expiry = Printf.sprintf "%s%s" msg - (Date.to_string expiry) + (Date.to_rfc3339 expiry) let expired_message obj = Printf.sprintf "%s has expired." obj @@ -58,7 +58,7 @@ let expiring_message obj = Printf.sprintf "%s is expiring soon." obj let maybe_generate_alert now obj_description alert_conditions expiry = let remaining_days = - days_until_expiry (Date.to_float now) (Date.to_float expiry) + days_until_expiry (Date.to_unix_time now) (Date.to_unix_time expiry) in alert_conditions |> List.sort (fun (a, _) (b, _) -> compare a b) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 2efca2c2ce8..24b2e597057 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -217,9 +217,8 @@ module Session = struct session instance has is_local_superuser set, then the value of \ this field is undefined." ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VDateTime (Date.of_float 0.))) - ~ty:DateTime "validation_time" - "time when session was last validated" + ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime + "validation_time" "time when session was last validated" ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "auth_user_sid" "the subject identifier of the user that was externally \ @@ -3897,7 +3896,7 @@ module VDI = struct ; param_name= "snapshot_time" ; param_doc= "Storage-specific config" ; param_release= tampa_release - ; param_default= Some (VDateTime Date.never) + ; param_default= Some (VDateTime Date.epoch) } ; { param_type= Ref _vdi @@ -4468,7 +4467,7 @@ module VDI = struct ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] "snapshots" "List pointing to all the VDIs snapshots." ; field ~in_product_since:rel_orlando - ~default_value:(Some (VDateTime Date.never)) ~qualifier:DynamicRO + ~default_value:(Some (VDateTime Date.epoch)) ~qualifier:DynamicRO ~ty:DateTime ~doc_tags:[Snapshots] "snapshot_time" "Date/time when this snapshot was created." ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando @@ -4752,7 +4751,7 @@ module VBD_metrics = struct uid _vbd_metrics ; namespace ~name:"io" ~contents:iobandwidth () ; field ~qualifier:DynamicRO ~ty:DateTime - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) ~lifecycle: [ (Published, rel_rio, "") @@ -5669,7 +5668,7 @@ module VMPP = struct "true if this protection policy's backup is running" ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:DateTime "backup_last_run_time" "time of the last backup" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~lifecycle:removed ~qualifier:StaticRO ~ty:archive_target_type "archive_target_type" "type of the archive target config" ~default_value:(Some (VEnum "none")) @@ -5693,7 +5692,7 @@ module VMPP = struct "true if this protection policy's archive is running" ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:DateTime "archive_last_run_time" "time of the last archive" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~lifecycle:removed ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs attached to this protection policy" ; field ~lifecycle:removed ~qualifier:StaticRO ~ty:Bool @@ -5856,7 +5855,7 @@ module VMSS = struct ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:DateTime "last_run_time" "time of the last snapshot" - ~default_value:(Some (VDateTime (Date.of_float 0.))) + ~default_value:(Some (VDateTime Date.epoch)) ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs attached to this snapshot schedule" ] diff --git a/ocaml/idl/datamodel_certificate.ml b/ocaml/idl/datamodel_certificate.ml index bfbdd2b60b5..53c594fb941 100644 --- a/ocaml/idl/datamodel_certificate.ml +++ b/ocaml/idl/datamodel_certificate.ml @@ -59,10 +59,10 @@ let t = ~default_value:(Some (VRef null_ref)) "The host where the certificate is installed" ; field ~qualifier:StaticRO ~lifecycle ~ty:DateTime "not_before" - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) "Date after which the certificate is valid" ; field ~qualifier:StaticRO ~lifecycle ~ty:DateTime "not_after" - ~default_value:(Some (VDateTime Date.never)) + ~default_value:(Some (VDateTime Date.epoch)) "Date before which the certificate is valid" ; field ~qualifier:StaticRO ~lifecycle: diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index b7d34350819..08bef5570c7 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -2188,8 +2188,7 @@ let t = "tls_verification_enabled" ~default_value:(Some (VBool false)) "True if this host has TLS verifcation enabled" ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:DateTime - "last_software_update" - ~default_value:(Some (VDateTime (Date.of_float 0.0))) + "last_software_update" ~default_value:(Some (VDateTime Date.epoch)) "Date and time when the last software update was applied" ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:Bool ~default_value:(Some (VBool false)) "https_only" diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index 61893c99e01..67a6fdd4ea1 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -26,12 +26,12 @@ *) module Date = struct - open Xapi_stdext_date + module Date = Xapi_stdext_date.Date include Date - let iso8601_of_rpc rpc = Date.of_string (Rpc.string_of_rpc rpc) + let t_of_rpc rpc = Date.of_iso8601 (Rpc.string_of_rpc rpc) - let rpc_of_iso8601 date = Rpc.rpc_of_string (Date.to_string date) + let rpc_of_t date = Rpc.rpc_of_string (Date.to_rfc3339 date) end (* useful constants for product vsn tracking *) @@ -418,7 +418,7 @@ type api_value = | VInt of int64 | VFloat of float | VBool of bool - | VDateTime of Date.iso8601 + | VDateTime of Date.t | VEnum of string | VMap of (api_value * api_value) list | VSet of api_value list diff --git a/ocaml/idl/datamodel_types.mli b/ocaml/idl/datamodel_types.mli index 76ac814eb49..fbfb9e4a6f6 100644 --- a/ocaml/idl/datamodel_types.mli +++ b/ocaml/idl/datamodel_types.mli @@ -1,9 +1,9 @@ module Date : sig include module type of Xapi_stdext_date.Date - val iso8601_of_rpc : Rpc.t -> Xapi_stdext_date.Date.iso8601 + val t_of_rpc : Rpc.t -> Xapi_stdext_date.Date.t - val rpc_of_iso8601 : Xapi_stdext_date.Date.iso8601 -> Rpc.t + val rpc_of_t : Xapi_stdext_date.Date.t -> Rpc.t end val oss_since_303 : string option @@ -115,7 +115,7 @@ type api_value = | VInt of int64 | VFloat of float | VBool of bool - | VDateTime of Date.iso8601 + | VDateTime of Date.t | VEnum of string | VMap of (api_value * api_value) list | VSet of api_value list diff --git a/ocaml/idl/datamodel_values.ml b/ocaml/idl/datamodel_values.ml index 1b463d4b2e7..e270899b50f 100644 --- a/ocaml/idl/datamodel_values.ml +++ b/ocaml/idl/datamodel_values.ml @@ -40,7 +40,7 @@ let rec to_rpc v = | VBool b -> Rpc.Bool b | VDateTime d -> - Rpc.String (Date.to_string d) + Rpc.String (Date.to_rfc3339 d) | VEnum e -> Rpc.String e | VMap vvl -> @@ -94,7 +94,7 @@ let to_db v = | VBool false -> String "false" | VDateTime d -> - String (Date.to_string d) + String (Date.to_rfc3339 d) | VEnum e -> String e | VMap vvl -> @@ -117,7 +117,7 @@ let gen_empty_db_val t = | Bool -> Value.String "false" | DateTime -> - Value.String (Date.to_string Date.never) + Value.String Date.(to_rfc3339 epoch) | Enum (_, (enum_value, _) :: _) -> Value.String enum_value | Enum (_, []) -> diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index bf6fe168f8a..28d77f8ae67 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -2072,7 +2072,7 @@ let t = "List pointing to all the VM snapshots." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando - ~default_value:(Some (VDateTime Date.never)) ~ty:DateTime + ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime "snapshot_time" "Date/time when this snapshot was created." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VString "")) diff --git a/ocaml/idl/json_backend/gen_json.ml b/ocaml/idl/json_backend/gen_json.ml index 446eeb04b8f..5c8fc0da0ff 100644 --- a/ocaml/idl/json_backend/gen_json.ml +++ b/ocaml/idl/json_backend/gen_json.ml @@ -89,7 +89,7 @@ end = struct | VBool x -> string_of_bool x | VDateTime x -> - Date.to_string x + Date.to_rfc3339 x | VEnum x -> x | VMap x -> diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 90fb78d39dc..5b18d603f4e 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -413,9 +413,9 @@ let gen_client_types highapi = "module Date = struct" ; " open Xapi_stdext_date" ; " include Date" - ; " let rpc_of_iso8601 x = DateTime (Date.to_string x)" - ; " let iso8601_of_rpc = function String x | DateTime x -> \ - Date.of_string x | _ -> failwith \"Date.iso8601_of_rpc\"" + ; " let rpc_of_t x = DateTime (Date.to_rfc3339 x)" + ; " let t_of_rpc = function String x | DateTime x -> Date.of_iso8601 \ + x | _ -> failwith \"Date.t_of_rpc\"" ; "end" ] ; [ diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 23c3dc8a747..44542173fe9 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -69,7 +69,7 @@ let dm_to_string tys : O.Module.t = | DT.Bool -> "string_of_bool" | DT.DateTime -> - "Date.to_string" + "Date.to_rfc3339" | DT.Enum (_name, cs) -> let aux (c, _) = Printf.sprintf {|| %s -> "%s"|} (OU.constructor_of c) c @@ -119,7 +119,7 @@ let string_to_dm tys : O.Module.t = | DT.Bool -> "bool_of_string" | DT.DateTime -> - "fun x -> Date.of_string x" + "fun x -> Date.of_iso8601 x" | DT.Enum (name, cs) -> let aux (c, _) = "\"" ^ c ^ "\" -> " ^ OU.constructor_of c in "fun v -> match v with\n " diff --git a/ocaml/idl/ocaml_backend/gen_test.ml b/ocaml/idl/ocaml_backend/gen_test.ml index d9824961db0..abf251014f0 100644 --- a/ocaml/idl/ocaml_backend/gen_test.ml +++ b/ocaml/idl/ocaml_backend/gen_test.ml @@ -32,7 +32,7 @@ let rec gen_test_type highapi ty = | DT.Bool -> "true" | DT.DateTime -> - "(Date.of_string \"20120101T00:00:00Z\")" + "(Date.of_iso8601 \"20120101T00:00:00Z\")" | DT.Enum (_, (x, _) :: _) -> Printf.sprintf "(%s)" (OU.constructor_of x) | DT.Set (DT.Enum (_, y)) -> diff --git a/ocaml/idl/ocaml_backend/ocaml_utils.ml b/ocaml/idl/ocaml_backend/ocaml_utils.ml index 3a6436c67f4..7fe7fe063bc 100644 --- a/ocaml/idl/ocaml_backend/ocaml_utils.ml +++ b/ocaml/idl/ocaml_backend/ocaml_utils.ml @@ -128,7 +128,7 @@ let rec ocaml_of_ty = function | Bool -> "bool" | DateTime -> - "Date.iso8601" + "Date.t" | Set (Record x) -> alias_of_ty (Record x) ^ " list" | Set x -> diff --git a/ocaml/libs/http-lib/xMLRPC.ml b/ocaml/libs/http-lib/xMLRPC.ml index a918f9b4e13..e9e425b976f 100644 --- a/ocaml/libs/http-lib/xMLRPC.ml +++ b/ocaml/libs/http-lib/xMLRPC.ml @@ -68,7 +68,7 @@ module To = struct let boolean b = value (box "boolean" [pcdata (if b then "1" else "0")]) let datetime s = - value (box "dateTime.iso8601" [pcdata (Xapi_stdext_date.Date.to_string s)]) + value (box "dateTime.iso8601" [pcdata (Xapi_stdext_date.Date.to_rfc3339 s)]) let double x = let txt = @@ -197,7 +197,7 @@ module From = struct let boolean = value (singleton ["boolean"] (( <> ) (Xml.PCData "0"))) let datetime x = - Xapi_stdext_date.Date.of_string + Xapi_stdext_date.Date.of_iso8601 (value (singleton ["dateTime.iso8601"] (pcdata id)) x) let double = value (singleton ["double"] (pcdata float_of_string)) diff --git a/ocaml/libs/http-lib/xMLRPC.mli b/ocaml/libs/http-lib/xMLRPC.mli index c8a7ca32af6..165f2e6ec52 100644 --- a/ocaml/libs/http-lib/xMLRPC.mli +++ b/ocaml/libs/http-lib/xMLRPC.mli @@ -59,7 +59,7 @@ module To : sig val boolean : bool -> xmlrpc (** Marshal a boolean. *) - val datetime : Xapi_stdext_date.Date.iso8601 -> xmlrpc + val datetime : Xapi_stdext_date.Date.t -> xmlrpc (** Marshal a date-time. *) val double : float -> xmlrpc @@ -98,7 +98,7 @@ module From : sig val boolean : xmlrpc -> bool (** Parse a boolean. *) - val datetime : xmlrpc -> Xapi_stdext_date.Date.iso8601 + val datetime : xmlrpc -> Xapi_stdext_date.Date.t (** Parse a date-time. *) val double : xmlrpc -> float diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli index 62e894808bf..c34bcfd9e1c 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli @@ -78,28 +78,34 @@ val diff : t -> t -> Ptime.Span.t (** Deprecated bindings, these will be removed in a future release: *) val rfc822_to_string : t -> string +[@@deprecated "Use Date.to_rfc822"] (** Same as {!to_rfc822} *) val rfc822_of_float : float -> t +[@@deprecated "Use Date.of_unix_time"] (** Same as {!of_unix_time} *) val of_float : float -> t +[@@deprecated "Use Date.of_unix_time"] (** Same as {!of_unix_time} *) val to_float : t -> float +[@@deprecated "Use Date.to_unix_time"] (** Same as {!to_unix_time} *) val to_string : t -> string +[@@deprecated "Use Date.to_rfc3339"] (** Same as {!to_rfc3339} *) val of_string : string -> t +[@@deprecated "Use Date.of_iso8601"] (** Same as {!of_iso8601} *) -val never : t +val never : t [@@deprecated "Use Date.epoch"] (** Same as {!epoch} *) (** Deprecated alias for {!t} *) -type iso8601 = t +type iso8601 = t [@@deprecated "Use Date.t"] (** Deprecated alias for {!t} *) -type rfc822 = t +type rfc822 = t [@@deprecated "Use Date.t"] diff --git a/ocaml/license/daily_license_check.ml b/ocaml/license/daily_license_check.ml index b107bfef755..3b6edecbb3e 100644 --- a/ocaml/license/daily_license_check.ml +++ b/ocaml/license/daily_license_check.ml @@ -13,7 +13,7 @@ let get_hosts all_license_params threshold = List.fold_left (fun acc (name_label, license_params) -> let expiry = List.assoc "expiry" license_params in - let expiry = Xapi_stdext_date.Date.(to_float (of_string expiry)) in + let expiry = Xapi_stdext_date.Date.(to_unix_time (of_iso8601 expiry)) in if expiry < threshold then name_label :: acc else @@ -23,7 +23,7 @@ let get_hosts all_license_params threshold = let check_license now pool_license_state all_license_params = let expiry = List.assoc "expiry" pool_license_state in - let expiry = Xapi_stdext_date.Date.(to_float (of_string expiry)) in + let expiry = Xapi_stdext_date.Date.(to_unix_time (of_iso8601 expiry)) in let days = days_to_expiry now expiry in if days <= 0. then Expired (get_hosts all_license_params now) diff --git a/ocaml/quicktest/qt.ml b/ocaml/quicktest/qt.ml index 7485cef15d4..3b6b7cd9743 100644 --- a/ocaml/quicktest/qt.ml +++ b/ocaml/quicktest/qt.ml @@ -100,9 +100,9 @@ module Time = struct let now () = Unix.gettimeofday () - let of_field = Xapi_stdext_date.Date.to_float + let of_field = Xapi_stdext_date.Date.to_unix_time - let pp t = Xapi_stdext_date.Date.of_float t |> Xapi_stdext_date.Date.to_string + let pp t = Xapi_stdext_date.Date.(of_unix_time t |> to_rfc3339) let check t ~after ~before = Alcotest.(check bool) @@ -269,7 +269,7 @@ module VDI = struct ; ( `Same , "snapshot_time" , fun vdi -> - vdi.API.vDI_snapshot_time |> Xapi_stdext_date.Date.to_string + vdi.API.vDI_snapshot_time |> Xapi_stdext_date.Date.to_rfc3339 ) ; (`Same, "virtual_size", fun vdi -> vdi.API.vDI_location) ] diff --git a/ocaml/quicktest/qt.mli b/ocaml/quicktest/qt.mli index 15dbb785f28..5ba4e8c68a0 100644 --- a/ocaml/quicktest/qt.mli +++ b/ocaml/quicktest/qt.mli @@ -34,7 +34,7 @@ module Time : sig val now : unit -> t - val of_field : Xapi_stdext_date.Date.iso8601 -> t + val of_field : Xapi_stdext_date.Date.t -> t val pp : t -> string diff --git a/ocaml/quicktest/quicktest_date.ml b/ocaml/quicktest/quicktest_date.ml index 7a7e6b7ba5e..35aec799cd4 100644 --- a/ocaml/quicktest/quicktest_date.ml +++ b/ocaml/quicktest/quicktest_date.ml @@ -3,9 +3,7 @@ module Date = Xapi_stdext_date.Date let test_host_get_server_localtime rpc session_id () = let host = Client.Host.get_by_uuid ~rpc ~session_id ~uuid:Qt.localhost_uuid in - let (_ : Date.iso8601) = - Client.Host.get_server_localtime ~rpc ~session_id ~host - in + let (_ : Date.t) = Client.Host.get_server_localtime ~rpc ~session_id ~host in () let test_message_get_since rpc session_id () = @@ -14,7 +12,7 @@ let test_message_get_since rpc session_id () = Forkhelpers.execute_command_get_output "/bin/date" [Printf.sprintf "+%s" format'; "-d"; "yesterday"] in - let yesterday = String.trim stdout |> Date.of_string in + let yesterday = String.trim stdout |> Date.of_iso8601 in let (_ : ('a API.Ref.t * API.message_t) list) = Client.Message.get_since ~rpc ~session_id ~since:yesterday in diff --git a/ocaml/quicktest/quicktest_event.ml b/ocaml/quicktest/quicktest_event.ml index f844db3e72c..a99f71bf752 100644 --- a/ocaml/quicktest/quicktest_event.ml +++ b/ocaml/quicktest/quicktest_event.ml @@ -381,7 +381,7 @@ let event_message_test rpc session_id () = ) ; let messages = Client.Client.Message.get ~rpc ~session_id ~cls ~obj_uuid - ~since:Xapi_stdext_date.Date.never + ~since:Xapi_stdext_date.Date.epoch in let has_msg m = List.exists (fun (r, _) -> r = m) messages in Alcotest.(check bool) diff --git a/ocaml/quicktest/quicktest_vdi.ml b/ocaml/quicktest/quicktest_vdi.ml index a648495eced..a11e1b2ce05 100644 --- a/ocaml/quicktest/quicktest_vdi.ml +++ b/ocaml/quicktest/quicktest_vdi.ml @@ -139,7 +139,7 @@ let vdi_bad_introduce rpc session_id sr_info () = ~location:(Ref.string_of (Ref.make ())) ~xenstore_data:[] ~sm_config:[] ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.never + ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.epoch ~snapshot_of:Ref.null in Alcotest.fail @@ -161,7 +161,7 @@ let vdi_bad_introduce rpc session_id sr_info () = ~other_config:[] ~location:vdir.API.vDI_location ~xenstore_data:[] ~sm_config:[] ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.never + ~is_a_snapshot:false ~snapshot_time:Xapi_stdext_date.Date.epoch ~snapshot_of:Ref.null in Alcotest.fail diff --git a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml index ff390468130..edaa3a7c7f9 100644 --- a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml +++ b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml @@ -1206,7 +1206,7 @@ and get_default_value_opt field = Printf.sprintf "DateTime.ParseExact(\"%s\", \"yyyyMMddTHH:mm:ssZ\", \ CultureInfo.InvariantCulture)" - (Date.to_string y) + (Date.to_rfc3339 y) ] | VEnum y -> [enum_of_wire y] diff --git a/ocaml/tests/alerts/test_alert_certificate_check.ml b/ocaml/tests/alerts/test_alert_certificate_check.ml index b35dcd362a5..ce84cc672d2 100644 --- a/ocaml/tests/alerts/test_alert_certificate_check.ml +++ b/ocaml/tests/alerts/test_alert_certificate_check.ml @@ -14,7 +14,7 @@ open Certificate_check -let date_of = Xapi_stdext_date.Date.of_string +let date_of = Xapi_stdext_date.Date.of_iso8601 let check_time = date_of "20200201T02:00:00Z" diff --git a/ocaml/tests/alerts/test_daily_license_check.ml b/ocaml/tests/alerts/test_daily_license_check.ml index 866830b1059..067d93288ce 100644 --- a/ocaml/tests/alerts/test_daily_license_check.ml +++ b/ocaml/tests/alerts/test_daily_license_check.ml @@ -37,7 +37,7 @@ let expiry = Alcotest.testable pp_expiry equals let check_time = - Xapi_stdext_date.Date.(to_float (of_string "20160601T04:00:00Z")) + Xapi_stdext_date.Date.(to_unix_time (of_iso8601 "20160601T04:00:00Z")) let test_expiry ((pool_license_state, all_license_params), expected) () = let result = check_license check_time pool_license_state all_license_params in diff --git a/ocaml/tests/alerts/test_expiry_alert.ml b/ocaml/tests/alerts/test_expiry_alert.ml index ece31122b78..2c3fa283be8 100644 --- a/ocaml/tests/alerts/test_expiry_alert.ml +++ b/ocaml/tests/alerts/test_expiry_alert.ml @@ -14,7 +14,7 @@ open Expiry_alert -let date_of = Xapi_stdext_date.Date.of_string +let date_of = Xapi_stdext_date.Date.of_iso8601 let test_expired = ("TEST_EXPIRED", 1L) diff --git a/ocaml/tests/common/test_common.ml b/ocaml/tests/common/test_common.ml index 1c1685f693d..293317518a4 100644 --- a/ocaml/tests/common/test_common.ml +++ b/ocaml/tests/common/test_common.ml @@ -296,7 +296,7 @@ let make_pool ~__context ~master ?(name_label = "") ?(name_description = "") ?(repository_proxy_username = "") ?(repository_proxy_password = Ref.null) ?(migration_compression = false) ?(coordinator_bias = true) ?(telemetry_uuid = Ref.null) ?(telemetry_frequency = `weekly) - ?(telemetry_next_collection = API.Date.never) + ?(telemetry_next_collection = API.Date.epoch) ?(last_update_sync = API.Date.epoch) ?(update_sync_frequency = `daily) ?(update_sync_day = 0L) ?(update_sync_enabled = false) ?(recommendations = []) () = @@ -393,7 +393,7 @@ let make_vdi ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) ?(read_only = false) ?(other_config = []) ?(storage_lock = false) ?(location = "") ?(managed = false) ?(missing = false) ?(parent = Ref.null) ?(xenstore_data = []) ?(sm_config = []) ?(is_a_snapshot = false) - ?(snapshot_of = Ref.null) ?(snapshot_time = API.Date.never) ?(tags = []) + ?(snapshot_of = Ref.null) ?(snapshot_time = API.Date.epoch) ?(tags = []) ?(allow_caching = true) ?(on_boot = `persist) ?(metadata_of_pool = Ref.make ()) ?(metadata_latest = true) ?(is_tools_iso = false) ?(cbt_enabled = false) () = @@ -516,9 +516,9 @@ let make_pool_update ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) let make_session ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) ?(this_host = Ref.null) ?(this_user = Ref.null) - ?(last_active = API.Date.never) ?(pool = false) ?(other_config = []) + ?(last_active = API.Date.epoch) ?(pool = false) ?(other_config = []) ?(is_local_superuser = false) ?(subject = Ref.null) - ?(validation_time = API.Date.never) ?(auth_user_sid = "") + ?(validation_time = API.Date.epoch) ?(auth_user_sid = "") ?(auth_user_name = "") ?(rbac_permissions = []) ?(parent = Ref.null) ?(originator = "test") ?(client_certificate = false) () = Db.Session.create ~__context ~ref ~uuid ~this_host ~this_user ~last_active diff --git a/ocaml/tests/test_client.ml b/ocaml/tests/test_client.ml index cdfa7690f79..1c3137721b8 100644 --- a/ocaml/tests/test_client.ml +++ b/ocaml/tests/test_client.ml @@ -13,7 +13,7 @@ let make_client_params ~__context = let rpc = Api_server.Server.dispatch_call req Unix.stdout in let session_id = let session_id = Ref.make () in - let now = Xapi_stdext_date.Date.of_float (Unix.time ()) in + let now = Xapi_stdext_date.Date.now () in let (_ : _ API.Ref.t) = Test_common.make_session ~__context ~ref:session_id ~this_host:(Helpers.get_localhost ~__context) diff --git a/ocaml/tests/test_pool_license.ml b/ocaml/tests/test_pool_license.ml index fbba9c74e36..aad9a145c11 100644 --- a/ocaml/tests/test_pool_license.ml +++ b/ocaml/tests/test_pool_license.ml @@ -29,22 +29,24 @@ let string_of_date_opt = function | None -> "None" | Some date -> - Printf.sprintf "Some %s" (Date.to_string date) + Printf.sprintf "Some %s" (Date.to_rfc3339 date) -let f2d = Date.of_float +let f2d = Date.of_unix_time -let f2d2s f = f |> Date.of_float |> Date.to_string +let f2d2s f = f |> Date.of_unix_time |> Date.to_rfc3339 let edition_to_int = [("edition1", 1); ("edition2", 2); ("edition3", 3)] module CompareDates = Generic.MakeStateless (struct module Io = struct - type input_t = Date.iso8601 option * Date.iso8601 option + type input_t = Date.t option * Date.t option type output_t = int let string_of_input_t = - Test_printers.(assoc_pair (option Date.to_string) (option Date.to_string)) + Test_printers.( + assoc_pair (option Date.to_rfc3339) (option Date.to_rfc3339) + ) let string_of_output_t = Test_printers.int end @@ -66,13 +68,13 @@ end) module PoolExpiryDate = Generic.MakeStateful (struct module Io = struct - type input_t = Date.iso8601 option list + type input_t = Date.t option list - type output_t = Date.iso8601 option + type output_t = Date.t option - let string_of_input_t = Test_printers.(list (option Date.to_string)) + let string_of_input_t = Test_printers.(list (option Date.to_rfc3339)) - let string_of_output_t = Test_printers.option Date.to_string + let string_of_output_t = Test_printers.option Date.to_rfc3339 end module State = Test_state.XapiDb @@ -86,7 +88,7 @@ module PoolExpiryDate = Generic.MakeStateful (struct | None -> [] | Some date -> - [("expiry", Date.to_string date)] + [("expiry", Date.to_rfc3339 date)] in let (_ : API.ref_host) = Test_common.make_host ~__context ~edition:"edition1" ~license_params @@ -201,10 +203,10 @@ module PoolLicenseState = Generic.MakeStateful (struct | None -> "never" | Some date -> - if date = Date.of_float License_check.never then + if date = Date.of_unix_time License_check.never then "never" else - Date.to_string date + Date.to_rfc3339 date in (pool_edition, pool_expiry) diff --git a/ocaml/tests/test_session.ml b/ocaml/tests/test_session.ml index 518dc221d72..4b441fc325b 100644 --- a/ocaml/tests/test_session.ml +++ b/ocaml/tests/test_session.ml @@ -1,8 +1,8 @@ module Date = Xapi_stdext_date.Date -let now = Date.of_string "2020-09-22T14:57:11Z" +let now = Date.of_iso8601 "2020-09-22T14:57:11Z" -let future = Date.of_string "2020-09-22T15:03:13Z" +let future = Date.of_iso8601 "2020-09-22T15:03:13Z" let fail_login ~__context ~uname ~originator ~now () = try diff --git a/ocaml/tests/test_updateinfo.ml b/ocaml/tests/test_updateinfo.ml index 6df2f0b2fba..6e05875e4f1 100644 --- a/ocaml/tests/test_updateinfo.ml +++ b/ocaml/tests/test_updateinfo.ml @@ -430,7 +430,7 @@ let fields_of_updateinfo = ) (list string) ; field "issued" - (fun (r : UpdateInfo.t) -> Xapi_stdext_date.Date.to_string r.issued) + (fun (r : UpdateInfo.t) -> Xapi_stdext_date.Date.to_rfc3339 r.issued) string ; field "severity" (fun (r : UpdateInfo.t) -> Severity.to_string r.severity) @@ -644,7 +644,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -690,7 +690,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -747,7 +747,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -771,7 +771,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:50Z" ; severity= Severity.None ; title= "title" } @@ -828,7 +828,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -852,7 +852,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:50Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:50Z" ; severity= Severity.None ; title= "title" } @@ -937,7 +937,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -1034,7 +1034,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct ; update_type= "security" ; livepatches= [] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -1112,7 +1112,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct } ] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } @@ -1190,7 +1190,7 @@ module UpdateInfoOfXml = Generic.MakeStateless (struct } ] ; issued= - Xapi_stdext_date.Date.of_string "2023-05-12T08:37:49Z" + Xapi_stdext_date.Date.of_iso8601 "2023-05-12T08:37:49Z" ; severity= Severity.High ; title= "title" } diff --git a/ocaml/tests/test_xapi_db_upgrade.ml b/ocaml/tests/test_xapi_db_upgrade.ml index 6b488fec157..f14838ef6ab 100644 --- a/ocaml/tests/test_xapi_db_upgrade.ml +++ b/ocaml/tests/test_xapi_db_upgrade.ml @@ -38,14 +38,14 @@ let update_snapshots () = let a = T.make_vm ~__context ~name_label:"a" () in let a_snap = T.make_vm ~__context ~name_label:"a snap" () in Db.VM.set_snapshot_of ~__context ~self:a_snap ~value:a ; - Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_float 1.) ; + Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_unix_time 1.) ; let b = T.make_vm ~__context ~name_label:"b" () in let b_snap = T.make_vm ~__context ~name_label:"b snap" () in Db.VM.set_snapshot_of ~__context ~self:b_snap ~value:b ; - Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_float 1.) ; + Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_unix_time 1.) ; let b_snap2 = T.make_vm ~__context ~name_label:"b snap2" () in Db.VM.set_snapshot_of ~__context ~self:b_snap2 ~value:b ; - Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_float 2.) ; + Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_unix_time 2.) ; X.update_snapshots.fn ~__context ; let check_vm = Alcotest.check Alcotest_comparators.(ref ()) in (* a.parent = a_snap *) diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index 068309abaca..d0d981309da 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -1956,7 +1956,7 @@ let vdi_introduce printer rpc session_id params = let virtual_size = 0L and physical_utilisation = 0L in let metadata_of_pool = Ref.null in let is_a_snapshot = false in - let snapshot_time = Date.never in + let snapshot_time = Date.epoch in let snapshot_of = Ref.null in let vdi = Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description @@ -3210,7 +3210,7 @@ exception Multiple_failure of (string * string) list let format_message msg = Printf.sprintf "Message: time=%s priority=%Ld name='%s'" - (Date.to_string msg.API.message_timestamp) + (Date.to_rfc3339 msg.API.message_timestamp) msg.API.message_priority msg.API.message_name let wrap_op printer pri rpc session_id op e = @@ -3220,7 +3220,7 @@ let wrap_op printer pri rpc session_id op e = try Client.Message.get ~rpc ~session_id ~cls:`VM ~obj_uuid:(safe_get_field (field_lookup e.fields "uuid")) - ~since:(Date.of_float now) + ~since:(Date.of_unix_time now) with _ -> [] in List.iter @@ -5272,7 +5272,7 @@ let with_license_server_changes printer rpc session_id params hosts f = current_license_servers ; let alerts = Client.Message.get_since ~rpc ~session_id - ~since:(Date.of_float (now -. 1.)) + ~since:(Date.of_unix_time (now -. 1.)) in let print_if_checkout_error (ref, msg) = if @@ -6245,7 +6245,7 @@ let license_of_host rpc session_id host = let rstr = Features.of_assoc_list params in let expiry = if List.mem_assoc "expiry" params then - Date.to_float (Date.of_string (List.assoc "expiry" params)) + Date.to_unix_time (Date.of_iso8601 (List.assoc "expiry" params)) else 0. in @@ -6280,7 +6280,7 @@ let diagnostic_license_status printer rpc session_id _params = ; String.sub h.uuid 0 8 ; Features.to_compact_string h.rstr ; h.edition - ; Date.to_string (Date.of_float h.expiry) + ; Date.to_rfc3339 (Date.of_unix_time h.expiry) ; Printf.sprintf "%.1f" ((h.expiry -. now) /. (24. *. 60. *. 60.)) ] ) diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 1e7393d377f..3798280d082 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -376,7 +376,7 @@ let message_record rpc session_id message = ~get:(fun () -> (x ()).API.message_obj_uuid) () ; make_field ~name:"timestamp" - ~get:(fun () -> Date.to_string (x ()).API.message_timestamp) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.message_timestamp) () ; make_field ~name:"body" ~get:(fun () -> (x ()).API.message_body) () ] @@ -749,10 +749,10 @@ let task_record rpc session_id task = ; make_field ~name:"type" ~get:(fun () -> (x ()).API.task_type) () ; make_field ~name:"result" ~get:(fun () -> (x ()).API.task_result) () ; make_field ~name:"created" - ~get:(fun () -> Date.to_string (x ()).API.task_created) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.task_created) () ; make_field ~name:"finished" - ~get:(fun () -> Date.to_string (x ()).API.task_finished) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.task_finished) () ; make_field ~name:"error_info" ~get:(fun () -> concat_with_semi (x ()).API.task_error_info) @@ -1447,11 +1447,11 @@ let pool_record rpc session_id pool = () ; make_field ~name:"telemetry-next-collection" ~get:(fun () -> - (x ()).API.pool_telemetry_next_collection |> Date.to_string + (x ()).API.pool_telemetry_next_collection |> Date.to_rfc3339 ) () ; make_field ~name:"last-update-sync" - ~get:(fun () -> Date.to_string (x ()).API.pool_last_update_sync) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.pool_last_update_sync) () ; make_field ~name:"update-sync-frequency" ~get:(fun () -> @@ -1550,7 +1550,7 @@ let vmss_record rpc session_id vmss = ) () ; make_field ~name:"last-run-time" - ~get:(fun () -> Date.to_string (x ()).API.vMSS_last_run_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vMSS_last_run_time) () ; make_field ~name:"VMs" ~get:(fun () -> @@ -1842,7 +1842,7 @@ let vm_record rpc session_id vm = ~get:(fun () -> get_uuids_from_refs (x ()).API.vM_snapshots) () ; make_field ~name:"snapshot-time" - ~get:(fun () -> Date.to_string (x ()).API.vM_snapshot_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vM_snapshot_time) () ; make_field ~name:"transportable-snapshot-id" ~hidden:true ~get:(fun () -> (x ()).API.vM_transportable_snapshot_id) @@ -2264,14 +2264,14 @@ let vm_record rpc session_id vm = ; make_field ~name:"start-time" ~get:(fun () -> Option.fold ~none:unknown_time - ~some:(fun m -> Date.to_string m.API.vM_metrics_start_time) + ~some:(fun m -> Date.to_rfc3339 m.API.vM_metrics_start_time) (xm ()) ) () ; make_field ~name:"install-time" ~get:(fun () -> Option.fold ~none:unknown_time - ~some:(fun m -> Date.to_string m.API.vM_metrics_install_time) + ~some:(fun m -> Date.to_rfc3339 m.API.vM_metrics_install_time) (xm ()) ) () @@ -2410,7 +2410,9 @@ let vm_record rpc session_id vm = ; make_field ~name:"guest-metrics-last-updated" ~get:(fun () -> Option.fold ~none:nid - ~some:(fun m -> Date.to_string m.API.vM_guest_metrics_last_updated) + ~some:(fun m -> + Date.to_rfc3339 m.API.vM_guest_metrics_last_updated + ) (xgm ()) ) () @@ -2611,7 +2613,7 @@ let host_crashdump_record rpc session_id host = ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crashdump_host) () ; make_field ~name:"timestamp" - ~get:(fun () -> Date.to_string (x ()).API.host_crashdump_timestamp) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.host_crashdump_timestamp) () ; make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.host_crashdump_size) @@ -3203,7 +3205,7 @@ let host_record rpc session_id host = ) () ; make_field ~name:"last-software-update" - ~get:(fun () -> Date.to_string (x ()).API.host_last_software_update) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.host_last_software_update) () ; make_field ~name:"latest-synced-updates-applied" ~get:(fun () -> @@ -3274,7 +3276,7 @@ let vdi_record rpc session_id vdi = ~get:(fun () -> get_uuids_from_refs (x ()).API.vDI_snapshots) () ; make_field ~name:"snapshot-time" - ~get:(fun () -> Date.to_string (x ()).API.vDI_snapshot_time) + ~get:(fun () -> Date.to_rfc3339 (x ()).API.vDI_snapshot_time) () ; make_field ~name:"allowed-operations" ~get:(fun () -> @@ -5118,7 +5120,7 @@ let cluster_host_record rpc session_id cluster_host = () ; make_field ~name:"last-update-live" ~get:(fun () -> - (x ()).API.cluster_host_last_update_live |> Date.to_string + (x ()).API.cluster_host_last_update_live |> Date.to_rfc3339 ) () ; make_field ~name:"allowed-operations" @@ -5184,10 +5186,10 @@ let certificate_record rpc session_id certificate = ~get:(fun () -> (x ()).API.certificate_host |> get_uuid_from_ref) () ; make_field ~name:"not-before" - ~get:(fun () -> (x ()).API.certificate_not_before |> Date.to_string) + ~get:(fun () -> (x ()).API.certificate_not_before |> Date.to_rfc3339) () ; make_field ~name:"not-after" - ~get:(fun () -> (x ()).API.certificate_not_after |> Date.to_string) + ~get:(fun () -> (x ()).API.certificate_not_after |> Date.to_rfc3339) () ; make_field ~name:"fingerprint" ~get:(fun () -> (x ()).API.certificate_fingerprint) diff --git a/ocaml/xapi-idl/storage/storage_interface.ml b/ocaml/xapi-idl/storage/storage_interface.ml index 4b3e03e48e9..f5bd93de60b 100644 --- a/ocaml/xapi-idl/storage/storage_interface.ml +++ b/ocaml/xapi-idl/storage/storage_interface.ml @@ -206,8 +206,7 @@ type vdi_info = { ; (* sm_config: workaround via XenAPI *) metadata_of_pool: string [@default ""] ; is_a_snapshot: bool [@default false] - ; snapshot_time: string - [@default Xapi_stdext_date.Date.to_string Xapi_stdext_date.Date.never] + ; snapshot_time: string [@default Xapi_stdext_date.Date.(to_rfc3339 epoch)] ; snapshot_of: Vdi.t [@default Vdi.of_string ""] ; (* managed: workaround via XenAPI *) read_only: bool [@default false] diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 74ea3bb8d9f..cd6575bc9b3 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1330,9 +1330,7 @@ let bind ~volume_script_dir = Volume_client.snapshot (volume_rpc ~dbg) dbg sr vdi ) >>>= fun response -> - let now = - Xapi_stdext_date.Date.(to_string (of_float (Unix.gettimeofday ()))) - in + let now = Xapi_stdext_date.Date.(to_rfc3339 (now ())) in set ~dbg ~sr ~vdi:response.Xapi_storage.Control.key ~key:_snapshot_time_key ~value:now >>>= fun () -> diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml index fe66194cb0e..6e1c01b7be6 100644 --- a/ocaml/xapi/certificates.ml +++ b/ocaml/xapi/certificates.ml @@ -214,7 +214,7 @@ end = struct in (name, Ref.null, `ca, remove_obsoleted_copies) in - let date_of_ptime time = Date.of_float (Ptime.to_float_s time) in + let date_of_ptime time = Date.of_unix_time (Ptime.to_float_s time) in let dates_of_ptimes (a, b) = (date_of_ptime a, date_of_ptime b) in let not_before, not_after = dates_of_ptimes (X509.Certificate.validity certificate) diff --git a/ocaml/xapi/create_misc.ml b/ocaml/xapi/create_misc.ml index beb94f4751c..d329170fce6 100644 --- a/ocaml/xapi/create_misc.ml +++ b/ocaml/xapi/create_misc.ml @@ -280,7 +280,7 @@ and create_domain_zero_record ~__context ~domain_zero_ref (host_info : host_info ~scheduled_to_be_resident_on:Ref.null ~affinity:localhost ~suspend_VDI:Ref.null ~domid:0L ~domarch ~is_control_domain:true ~is_a_template:false ~is_default_template:false ~is_a_snapshot:false - ~snapshot_time:Date.never ~snapshot_of:Ref.null + ~snapshot_time:Date.epoch ~snapshot_of:Ref.null ~transportable_snapshot_id:"" ~snapshot_info:[] ~snapshot_metadata:"" ~parent:Ref.null ~other_config:[] ~blobs:[] ~xenstore_data:[] ~tags:[] ~user_version:1L ~ha_restart_priority:"" ~ha_always_run:false @@ -348,8 +348,8 @@ and create_domain_zero_metrics_record ~__context ~domain_zero_metrics_ref ~memory_actual:memory_constraints.target ~vCPUs_utilisation:(List.map (fun x -> (Int64.of_int x, 0.)) (mkints vcpus)) ~vCPUs_number:(Int64.of_int vcpus) ~vCPUs_CPU:[] ~vCPUs_params:[] - ~vCPUs_flags:[] ~state:[] ~start_time:Date.never ~install_time:Date.never - ~last_updated:Date.never ~other_config:[] ~hvm:false ~nomigrate:false + ~vCPUs_flags:[] ~state:[] ~start_time:Date.epoch ~install_time:Date.epoch + ~last_updated:Date.epoch ~other_config:[] ~hvm:false ~nomigrate:false ~nested_virt:false ~current_domain_type:Xapi_globs.domain_zero_domain_type and update_domain_zero_record ~__context ~domain_zero_ref (host_info : host_info) diff --git a/ocaml/xapi/db_gc_util.ml b/ocaml/xapi/db_gc_util.ml index 182eaac00df..7972aa28ed9 100644 --- a/ocaml/xapi/db_gc_util.ml +++ b/ocaml/xapi/db_gc_util.ml @@ -308,14 +308,14 @@ let timeout_tasks ~__context = let completed_old, completed_young = List.partition (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time + Date.to_unix_time t.Db_actions.task_finished < oldest_completed_time ) completed_gcable in let pending_old, pending_young = List.partition (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time + Date.to_unix_time t.Db_actions.task_created < oldest_pending_time ) pending in @@ -360,8 +360,8 @@ let timeout_tasks ~__context = List.sort (fun (_, t1) (_, t2) -> compare - (Date.to_float t1.Db_actions.task_finished) - (Date.to_float t2.Db_actions.task_finished) + (Date.to_unix_time t1.Db_actions.task_finished) + (Date.to_unix_time t2.Db_actions.task_finished) ) completed in @@ -422,7 +422,7 @@ let timeout_sessions_common ~__context sessions limit session_group = List.map (fun (x, y) -> ( x - , Date.to_float y.Db_actions.session_last_active + , Date.to_unix_time y.Db_actions.session_last_active , y.Db_actions.session_uuid ) ) @@ -447,7 +447,7 @@ let timeout_sessions_common ~__context sessions limit session_group = debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) - (Date.to_string (Date.of_float active)) + (Date.to_rfc3339 (Date.of_unix_time active)) doc ; Xapi_session.destroy_db_session ~__context ~self:s ) @@ -586,7 +586,7 @@ let timeout_alerts ~__context = let all_alerts = Db.Alert.get_all ~__context in let now = Unix.gettimeofday() in List.iter (fun alert -> - let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in + let alert_time = Date.to_unix_time (Db.Alert.get_timestamp ~__context ~self:alert) in if now -. alert_time > Xapi_globs.alert_timeout then Db.Alert.destroy ~__context ~self:alert ) all_alerts diff --git a/ocaml/xapi/dbsync.ml b/ocaml/xapi/dbsync.ml index 5ee30bb39c8..875b406bb89 100644 --- a/ocaml/xapi/dbsync.ml +++ b/ocaml/xapi/dbsync.ml @@ -42,7 +42,7 @@ let create_host_metrics ~__context = Db.Host_metrics.create ~__context ~ref:r ~uuid:(Uuidx.to_string (Uuidx.make ())) ~live:false ~memory_total:0L ~memory_free:0L - ~last_updated:Xapi_stdext_date.Date.never ~other_config:[] ; + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ; Db.Host.set_metrics ~__context ~self ~value:r ) ) diff --git a/ocaml/xapi/dbsync_master.ml b/ocaml/xapi/dbsync_master.ml index 8f8e6a582f8..31f235e7214 100644 --- a/ocaml/xapi/dbsync_master.ml +++ b/ocaml/xapi/dbsync_master.ml @@ -291,11 +291,10 @@ let ensure_vm_metrics_records_exist __context = let uuid = Uuidx.to_string (Uuidx.make ()) in Db.VM_metrics.create ~__context ~ref:m ~uuid ~vCPUs_number:0L ~vCPUs_utilisation:[] ~memory_actual:0L ~vCPUs_CPU:[] ~vCPUs_params:[] - ~vCPUs_flags:[] ~start_time:Xapi_stdext_date.Date.never - ~install_time:Xapi_stdext_date.Date.never ~state:[] - ~last_updated:(Xapi_stdext_date.Date.of_float 0.) - ~other_config:[] ~hvm:false ~nested_virt:false ~nomigrate:false - ~current_domain_type:`unspecified ; + ~vCPUs_flags:[] ~start_time:Xapi_stdext_date.Date.epoch + ~install_time:Xapi_stdext_date.Date.epoch ~state:[] + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ~hvm:false + ~nested_virt:false ~nomigrate:false ~current_domain_type:`unspecified ; Db.VM.set_metrics ~__context ~self:vm ~value:m ) ) diff --git a/ocaml/xapi/dbsync_slave.ml b/ocaml/xapi/dbsync_slave.ml index 32ee7d44d21..3b90a3a05c3 100644 --- a/ocaml/xapi/dbsync_slave.ml +++ b/ocaml/xapi/dbsync_slave.ml @@ -73,12 +73,12 @@ let get_start_time () = let uptime = String.split ' ' uptime in let uptime = List.hd uptime in let uptime = float_of_string uptime in - let boot_time = Date.of_float (now -. uptime) in - debug " system booted at %s" (Date.to_string boot_time) ; + let boot_time = Date.of_unix_time (now -. uptime) in + debug " system booted at %s" (Date.to_rfc3339 boot_time) ; boot_time with e -> debug "Calculating boot time failed with '%s'" (ExnHelper.string_of_exn e) ; - Date.never + Date.epoch (* not sufficient just to fill in this data on create time [Xen caps may change if VT enabled in BIOS etc.] *) @@ -106,7 +106,9 @@ let refresh_localhost_info ~__context info = Db.Host.set_capabilities ~__context ~self:host ~value:caps ; Db.Host.set_address ~__context ~self:host ~value:(get_my_ip_addr ~__context) ; let boot_time_key = "boot_time" in - let boot_time_value = string_of_float (Date.to_float (get_start_time ())) in + let boot_time_value = + string_of_float (Date.to_unix_time (get_start_time ())) + in Db.Host.remove_from_other_config ~__context ~self:host ~key:boot_time_key ; Db.Host.add_to_other_config ~__context ~self:host ~key:boot_time_key ~value:boot_time_value ; diff --git a/ocaml/xapi/debug_populate.ml b/ocaml/xapi/debug_populate.ml index 0eb1a89581a..3c192509915 100644 --- a/ocaml/xapi/debug_populate.ml +++ b/ocaml/xapi/debug_populate.ml @@ -72,7 +72,7 @@ let rec make_vdis_and_vbds __context vmref i = let physical_utilisation = 1L in let metadata_of_pool = Ref.null in let is_a_snapshot = false in - let snapshot_time = Xapi_stdext_date.Date.never in + let snapshot_time = Xapi_stdext_date.Date.epoch in let snapshot_of = Ref.null in let sharable = false in let cbt_enabled = false in diff --git a/ocaml/xapi/export.ml b/ocaml/xapi/export.ml index 24589827bc8..a81ec647225 100644 --- a/ocaml/xapi/export.ml +++ b/ocaml/xapi/export.ml @@ -241,7 +241,7 @@ let make_vm ?(with_snapshot_metadata = false) ~preserve_power_state table ; API.vM_snapshots= (if with_snapshot_metadata then vm.API.vM_snapshots else []) ; API.vM_snapshot_time= - (if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.never) + (if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.epoch) ; API.vM_transportable_snapshot_id= ( if with_snapshot_metadata then vm.API.vM_transportable_snapshot_id diff --git a/ocaml/xapi/license_check.ml b/ocaml/xapi/license_check.ml index d34f2bd4526..e6df516f353 100644 --- a/ocaml/xapi/license_check.ml +++ b/ocaml/xapi/license_check.ml @@ -20,7 +20,7 @@ let never, _ = let get_expiry_date ~__context ~host = let license = Db.Host.get_license_params ~__context ~self:host in if List.mem_assoc "expiry" license then - Some (Xapi_stdext_date.Date.of_string (List.assoc "expiry" license)) + Some (Xapi_stdext_date.Date.of_iso8601 (List.assoc "expiry" license)) else None @@ -30,7 +30,7 @@ let check_expiry ~__context ~host = | None -> false (* No expiry date means no expiry :) *) | Some date -> - Unix.time () > Xapi_stdext_date.Date.to_float date + Unix.time () > Xapi_stdext_date.Date.to_unix_time date in if expired then raise (Api_errors.Server_error (Api_errors.license_expired, [])) diff --git a/ocaml/xapi/license_check.mli b/ocaml/xapi/license_check.mli index f970ff878df..610faaf9e0b 100644 --- a/ocaml/xapi/license_check.mli +++ b/ocaml/xapi/license_check.mli @@ -20,9 +20,7 @@ val never : float (** The expiry date that is considered to be "never". *) val get_expiry_date : - __context:Context.t - -> host:API.ref_host - -> Xapi_stdext_date.Date.iso8601 option + __context:Context.t -> host:API.ref_host -> Xapi_stdext_date.Date.t option (** Returns (Some date) if the host's license has an expiry date, * otherwise returns None. *) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index 2ba1139de32..f0ba0a38a43 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -1131,7 +1131,7 @@ functor let set_telemetry_next_collection ~__context ~self ~value = info "%s: pool='%s' value='%s'" __FUNCTION__ (pool_uuid ~__context self) - (Xapi_stdext_date.Date.to_string value) ; + (Xapi_stdext_date.Date.to_rfc3339 value) ; Local.Pool.set_telemetry_next_collection ~__context ~self ~value let reset_telemetry_uuid ~__context ~self = diff --git a/ocaml/xapi/monitor_master.ml b/ocaml/xapi/monitor_master.ml index bb4e6cf2e5b..ffad86ccd6c 100644 --- a/ocaml/xapi/monitor_master.ml +++ b/ocaml/xapi/monitor_master.ml @@ -70,8 +70,7 @@ let set_pif_metrics ~__context ~self ~vendor ~device ~carrier ~speed ~duplex Db.PIF_metrics.set_duplex ~__context ~self ~value:duplex ; if pmr.API.pIF_metrics_pci_bus_path <> pcibuspath then Db.PIF_metrics.set_pci_bus_path ~__context ~self ~value:pcibuspath ; - Db.PIF_metrics.set_last_updated ~__context ~self - ~value:(Date.of_float (Unix.gettimeofday ())) + Db.PIF_metrics.set_last_updated ~__context ~self ~value:(Date.now ()) (* Note that the following function is actually called on the slave most of the * time now but only when the PIF information changes. *) @@ -190,8 +189,8 @@ let update_pifs ~__context host pifs = ~uuid:(Uuidx.to_string (Uuidx.make ())) ~carrier:false ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" ~speed:0L ~duplex:false ~pci_bus_path:"" - ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Date.of_float 0.) ~other_config:[] ; + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch + ~other_config:[] ; Db.PIF.set_metrics ~__context ~self:pifdev ~value:ref ; ref in diff --git a/ocaml/xapi/pool_periodic_update_sync.ml b/ocaml/xapi/pool_periodic_update_sync.ml index 5f1e502b2b4..45aacf82a9c 100644 --- a/ocaml/xapi/pool_periodic_update_sync.ml +++ b/ocaml/xapi/pool_periodic_update_sync.ml @@ -144,7 +144,7 @@ let rec update_sync () = with e -> let exc = Printexc.to_string e in warn "Periodic update sync failed with exception %s" exc ; - let now = Xapi_stdext_date.Date.(now () |> to_string) in + let now = Xapi_stdext_date.Date.(now () |> to_rfc3339) in let body = Printf.sprintf "Periodic update sync \ diff --git a/ocaml/xapi/storage_smapiv1.ml b/ocaml/xapi/storage_smapiv1.ml index 465b5d354b1..d8bf2cdc203 100644 --- a/ocaml/xapi/storage_smapiv1.ml +++ b/ocaml/xapi/storage_smapiv1.ml @@ -86,7 +86,7 @@ let vdi_info_of_vdi_rec __context vdi_rec = ; ty= Storage_utils.string_of_vdi_type vdi_rec.API.vDI_type ; metadata_of_pool= Ref.string_of vdi_rec.API.vDI_metadata_of_pool ; is_a_snapshot= vdi_rec.API.vDI_is_a_snapshot - ; snapshot_time= Date.to_string vdi_rec.API.vDI_snapshot_time + ; snapshot_time= Date.to_rfc3339 vdi_rec.API.vDI_snapshot_time ; snapshot_of= ( if Db.is_valid_ref __context vdi_rec.API.vDI_snapshot_of then Db.VDI.get_uuid ~__context ~self:vdi_rec.API.vDI_snapshot_of @@ -146,7 +146,7 @@ module SMAPIv1 : Server_impl = struct Server_helpers.exec_with_new_task "VDI.set_snapshot_time" ~subtask_of:(Ref.of_string dbg) (fun __context -> let vdi, _ = find_vdi ~__context sr vdi in - let snapshot_time = Date.of_string snapshot_time in + let snapshot_time = Date.of_iso8601 snapshot_time in Db.VDI.set_snapshot_time ~__context ~self:vdi ~value:snapshot_time ) @@ -761,7 +761,7 @@ module SMAPIv1 : Server_impl = struct (Db.VDI.get_other_config ~__context ~self:clonee) with _ -> Uuidx.(to_string (make ())) in - let snapshot_time = Date.of_float (Unix.gettimeofday ()) in + let snapshot_time = Date.now () in Db.VDI.set_name_label ~__context ~self ~value:vdi_info.name_label ; Db.VDI.set_name_description ~__context ~self ~value:vdi_info.name_description ; diff --git a/ocaml/xapi/storage_smapiv1_wrapper.ml b/ocaml/xapi/storage_smapiv1_wrapper.ml index 2efa9194168..7c5a6a97f43 100644 --- a/ocaml/xapi/storage_smapiv1_wrapper.ml +++ b/ocaml/xapi/storage_smapiv1_wrapper.ml @@ -90,7 +90,7 @@ let host_state_path = ref "/var/run/nonpersistent/xapi/storage.db" let indent x = " " ^ x -let string_of_date x = Date.to_string (Date.of_float x) +let string_of_date x = Date.to_rfc3339 (Date.of_unix_time x) let with_dbg ~name ~dbg f = Debug_info.with_dbg ~with_thread:true ~module_name:"SMAPIv1-Wrapper" ~name diff --git a/ocaml/xapi/taskHelper.ml b/ocaml/xapi/taskHelper.ml index 1f1d4fd7744..27e30ce3d39 100644 --- a/ocaml/xapi/taskHelper.ml +++ b/ocaml/xapi/taskHelper.ml @@ -44,7 +44,7 @@ let make ~__context ~http_other_config ?(description = "") ?session_id in let (_ : unit) = Db_actions.DB_Action.Task.create ~ref ~__context ~created:(Date.now ()) - ~finished:(Date.of_float 0.0) ~current_operations:[] ~_type:"" + ~finished:Date.epoch ~current_operations:[] ~_type:"" ~session:(Option.value ~default:Ref.null session_id) ~resident_on:!Xapi_globs.localhost_ref ~status:`pending ~result:"" ~progress:0. ~error_info:[] ~allowed_operations:[] diff --git a/ocaml/xapi/updateinfo.ml b/ocaml/xapi/updateinfo.ml index 375604cfe78..88e4a0cf0fc 100644 --- a/ocaml/xapi/updateinfo.ml +++ b/ocaml/xapi/updateinfo.ml @@ -562,7 +562,7 @@ module UpdateInfo = struct ; ("special-info", `String ui.spec_info) ; ("URL", `String ui.url) ; ("type", `String ui.update_type) - ; ("issued", `String (Xapi_stdext_date.Date.to_string ui.issued)) + ; ("issued", `String (Xapi_stdext_date.Date.to_rfc3339 ui.issued)) ; ("severity", `String (Severity.to_string ui.severity)) ; ( "livepatches" , `List (List.map (fun x -> LivePatch.to_json x) ui.livepatches) @@ -656,7 +656,7 @@ module UpdateInfo = struct match List.assoc_opt "date" attrs with | Some date -> ( try - Xapi_stdext_date.Date.of_string + Xapi_stdext_date.Date.of_iso8601 (Scanf.sscanf date "%04d-%02d-%02d %02d:%02d:%02d" (fun y mon d h m s -> diff --git a/ocaml/xapi/vpx.ml b/ocaml/xapi/vpx.ml index a1cd5924987..2871ad6486f 100644 --- a/ocaml/xapi/vpx.ml +++ b/ocaml/xapi/vpx.ml @@ -27,7 +27,7 @@ type jobInfo = { ; importInfo: importInfo } -type dateTime = Xapi_stdext_date.Date.iso8601 +type dateTime = Xapi_stdext_date.Date.t type jobInstance = { id: string @@ -221,12 +221,12 @@ let serverInfo_of_rpc r = } } -let rpc_of_dateTime v = Rpc.DateTime (Xapi_stdext_date.Date.to_string v) +let rpc_of_dateTime v = Rpc.DateTime (Xapi_stdext_date.Date.to_rfc3339 v) let dateTime_of_rpc r = match r with | Rpc.DateTime v -> - Xapi_stdext_date.Date.of_string v + Xapi_stdext_date.Date.of_iso8601 v | x -> rpc_type_error x "DateTime" "DateTime(datetime)" diff --git a/ocaml/xapi/xapi_blob.ml b/ocaml/xapi/xapi_blob.ml index 6be9cdd9abd..f483f8d7835 100644 --- a/ocaml/xapi/xapi_blob.ml +++ b/ocaml/xapi/xapi_blob.ml @@ -24,7 +24,7 @@ let create ~__context ~mime_type ~public = if mime_type = "" then "application/octet-stream" else mime_type in Db.Blob.create ~__context ~ref ~uuid:(Uuidx.to_string uuid) ~public - ~mime_type:mime_type' ~size:0L ~last_updated:Xapi_stdext_date.Date.never + ~mime_type:mime_type' ~size:0L ~last_updated:Xapi_stdext_date.Date.epoch ~name_label:"" ~name_description:"" ; ref @@ -212,7 +212,7 @@ let handler (req : Http.Request.t) s _ = in Db.Blob.set_size ~__context ~self ~value:size ; Db.Blob.set_last_updated ~__context ~self - ~value:(Xapi_stdext_date.Date.of_float (Unix.gettimeofday ())) + ~value:(Xapi_stdext_date.Date.of_unix_time (Unix.gettimeofday ())) | _ -> failwith "Unsupported method for BLOB" in diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index ffde9e7bf77..bd13e808ec8 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -329,7 +329,7 @@ let create_and_set_guest_metrics (lookup : string -> string option) ~pV_drivers_version:initial_gm.pv_drivers_version ~pV_drivers_up_to_date:pV_drivers_detected ~memory:[] ~disks:[] ~networks:initial_gm.networks ~pV_drivers_detected ~other:initial_gm.other - ~last_updated:(Date.of_float initial_gm.last_updated) + ~last_updated:(Date.of_unix_time initial_gm.last_updated) ~other_config:[] ~live:true ~can_use_hotplug_vbd:initial_gm.can_use_hotplug_vbd ~can_use_hotplug_vif:initial_gm.can_use_hotplug_vif ; @@ -469,7 +469,7 @@ let all (lookup : string -> string option) (list : string -> string list) (* if(guest_metrics_cached.memory <> memory) then Db.VM_guest_metrics.set_memory ~__context ~self:gm ~value:memory; *) Db.VM_guest_metrics.set_last_updated ~__context ~self:gm - ~value:(Date.of_float last_updated) ; + ~value:(Date.of_unix_time last_updated) ; if guest_metrics_cached.device_id <> device_id then if List.mem_assoc Xapi_globs.device_id_key_name device_id then ( let value = List.assoc Xapi_globs.device_id_key_name device_id in diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 578788f8c9c..ddfbc357fb2 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -1463,7 +1463,7 @@ let rec propose_new_master_internal ~__context ~address ~manual = (Printf.sprintf "Already agreed to commit host address '%s' at %s ('%f' secs ago)" x - (Date.to_string (Date.of_float !proposed_master_time)) + (Date.to_rfc3339 (Date.of_unix_time !proposed_master_time)) diff ) | None -> diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 897e8572e13..aa2f07e2fba 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -43,7 +43,7 @@ let take n xs = in loop n [] xs -let get_servertime ~__context ~host:_ = Date.of_float (Unix.gettimeofday ()) +let get_servertime ~__context ~host:_ = Date.now () let get_server_localtime ~__context ~host:_ = Date.localtime () @@ -1009,7 +1009,7 @@ let create ~__context ~uuid ~name_label ~name_description:_ ~hostname ~address let make_new_metrics_object ref = Db.Host_metrics.create ~__context ~ref ~uuid:(Uuidx.to_string (Uuidx.make ())) - ~live:false ~memory_total:0L ~memory_free:0L ~last_updated:Date.never + ~live:false ~memory_total:0L ~memory_free:0L ~last_updated:Date.epoch ~other_config:[] in let name_description = "Default install" and host = Ref.make () in @@ -1058,8 +1058,7 @@ let create ~__context ~uuid ~name_label ~name_description:_ ~hostname ~address ~latest_synced_updates_applied:`unknown ~pending_guidances_recommended:[] ~pending_guidances_full:[] ~last_update_hash:"" ; (* If the host we're creating is us, make sure its set to live *) - Db.Host_metrics.set_last_updated ~__context ~self:metrics - ~value:(Date.of_float (Unix.gettimeofday ())) ; + Db.Host_metrics.set_last_updated ~__context ~self:metrics ~value:(Date.now ()) ; Db.Host_metrics.set_live ~__context ~self:metrics ~value:host_is_us ; host diff --git a/ocaml/xapi/xapi_host.mli b/ocaml/xapi/xapi_host.mli index 0b61af641a2..8813f037b19 100644 --- a/ocaml/xapi/xapi_host.mli +++ b/ocaml/xapi/xapi_host.mli @@ -272,10 +272,9 @@ val sync_data : __context:Context.t -> host:API.ref_host -> unit val backup_rrds : __context:Context.t -> host:'b -> delay:float -> unit -val get_servertime : __context:'a -> host:'b -> Xapi_stdext_date.Date.iso8601 +val get_servertime : __context:'a -> host:'b -> Xapi_stdext_date.Date.t -val get_server_localtime : - __context:'a -> host:'b -> Xapi_stdext_date.Date.iso8601 +val get_server_localtime : __context:'a -> host:'b -> Xapi_stdext_date.Date.t val enable_binary_storage : __context:Context.t -> host:[`host] Ref.t -> unit diff --git a/ocaml/xapi/xapi_host_crashdump.ml b/ocaml/xapi/xapi_host_crashdump.ml index f16255e2c97..e8095ba6259 100644 --- a/ocaml/xapi/xapi_host_crashdump.ml +++ b/ocaml/xapi/xapi_host_crashdump.ml @@ -134,7 +134,7 @@ let resynchronise ~__context ~host = with _ -> (Unix.stat (Filename.concat crash_dir filename)).Unix.st_ctime in - let timestamp = Date.of_float timestamp in + let timestamp = Date.of_unix_time timestamp in let r = Ref.make () and uuid = Uuidx.to_string (Uuidx.make ()) in Db.Host_crashdump.create ~__context ~ref:r ~uuid ~other_config:[] ~host ~timestamp ~size ~filename diff --git a/ocaml/xapi/xapi_local_session.ml b/ocaml/xapi/xapi_local_session.ml index 148c776ca2f..7a5cf5f5070 100644 --- a/ocaml/xapi/xapi_local_session.ml +++ b/ocaml/xapi/xapi_local_session.ml @@ -14,11 +14,7 @@ (** Code to handle local sessions, used so that slaves can communicate even when the master is down. *) -type t = { - r: API.ref_session - ; pool: bool - ; last_active: Xapi_stdext_date.Date.iso8601 -} +type t = {r: API.ref_session; pool: bool; last_active: Xapi_stdext_date.Date.t} let with_lock = Xapi_stdext_threads.Threadext.Mutex.execute @@ -31,13 +27,7 @@ let get_all ~__context = let create ~__context ~pool = let r = Ref.make () in - let session = - { - r - ; pool - ; last_active= Xapi_stdext_date.Date.of_float (Unix.gettimeofday ()) - } - in + let session = {r; pool; last_active= Xapi_stdext_date.Date.now ()} in with_lock m (fun () -> Hashtbl.replace table r session) ; r diff --git a/ocaml/xapi/xapi_local_session.mli b/ocaml/xapi/xapi_local_session.mli index dbba9a57a8b..c7859879990 100644 --- a/ocaml/xapi/xapi_local_session.mli +++ b/ocaml/xapi/xapi_local_session.mli @@ -13,11 +13,7 @@ *) (** Represents local sessions, for use in emergency mode *) -type t = { - r: API.ref_session - ; pool: bool - ; last_active: Xapi_stdext_date.Date.iso8601 -} +type t = {r: API.ref_session; pool: bool; last_active: Xapi_stdext_date.Date.t} val get_all : __context:Context.t -> API.ref_session list diff --git a/ocaml/xapi/xapi_message.ml b/ocaml/xapi/xapi_message.ml index 8d81bef3777..8bc43cc48e8 100644 --- a/ocaml/xapi/xapi_message.ml +++ b/ocaml/xapi/xapi_message.ml @@ -73,7 +73,7 @@ let to_xml output _ref gen message = ; tag "priority" [data (Int64.to_string message.API.message_priority)] ; tag "cls" [data (Record_util.cls_to_string message.API.message_cls)] ; tag "obj_uuid" [data message.API.message_obj_uuid] - ; tag "timestamp" [data (Date.to_string message.API.message_timestamp)] + ; tag "timestamp" [data (Date.to_rfc3339 message.API.message_timestamp)] ; tag "uuid" [data message.API.message_uuid] ; tag "body" [data message.API.message_body] ] @@ -96,7 +96,7 @@ let of_xml input = ; API.message_priority= 0L ; API.message_cls= `VM ; API.message_obj_uuid= "" - ; API.message_timestamp= Date.never + ; API.message_timestamp= Date.epoch ; API.message_body= "" ; API.message_uuid= "" } @@ -123,7 +123,8 @@ let of_xml input = | "obj_uuid" -> message := {!message with API.message_obj_uuid= dat} | "timestamp" -> - message := {!message with API.message_timestamp= Date.of_string dat} + message := + {!message with API.message_timestamp= Date.of_iso8601 dat} | "uuid" -> message := {!message with API.message_uuid= dat} | "body" -> @@ -342,7 +343,7 @@ let write ~__context ~_ref ~message = ) ) ; Unixext.mkdir_rec message_dir 0o700 ; - let timestamp = ref (Date.to_float message.API.message_timestamp) in + let timestamp = ref (Date.to_unix_time message.API.message_timestamp) in if message_exists () then Some (message_gen ()) else @@ -442,7 +443,7 @@ let create ~__context ~name ~priority ~cls ~obj_uuid ~body = ; API.message_priority= priority ; API.message_cls= cls ; API.message_obj_uuid= obj_uuid - ; API.message_timestamp= Date.of_float timestamp + ; API.message_timestamp= Date.of_unix_time timestamp ; API.message_body= body } in @@ -596,8 +597,8 @@ let get_real_inner dir filter name_filter = r else compare - (Date.to_float m2.API.message_timestamp) - (Date.to_float m1.API.message_timestamp) + (Date.to_unix_time m2.API.message_timestamp) + (Date.to_unix_time m1.API.message_timestamp) ) messages with _ -> [] @@ -631,16 +632,16 @@ let get ~__context ~cls ~obj_uuid ~since = (* Read in all the messages for a particular object *) let class_symlink = class_symlink cls obj_uuid in if not (check_uuid ~__context ~cls ~uuid:obj_uuid) then - raise (Api_errors.Server_error (Api_errors.uuid_invalid, [])) ; + raise Api_errors.(Server_error (uuid_invalid, [])) ; let msg = get_real_inner class_symlink - (fun msg -> Date.to_float msg.API.message_timestamp > Date.to_float since) + (fun msg -> Date.is_later msg.API.message_timestamp ~than:since) (fun _ -> true) in List.map (fun (_, b, c) -> (b, c)) msg let get_since ~__context ~since = - get_real message_dir (fun _ -> true) (Date.to_float since) + get_real message_dir (fun _ -> true) (Date.to_unix_time since) let get_since_for_events ~__context since = let cached_result = @@ -747,7 +748,7 @@ let repopulate_cache () = let last_256 = Listext.List.take 256 messages in in_memory_cache := last_256 ; let get_ts (ts, _, m) = - Printf.sprintf "%Ld (%s)" ts (Date.to_string m.API.message_timestamp) + Printf.sprintf "%Ld (%s)" ts (Date.to_rfc3339 m.API.message_timestamp) in debug "Constructing in-memory-cache: most length=%d" (List.length last_256) ; ( try diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index b6067a509de..56dff779240 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -412,7 +412,7 @@ let make_pif_metrics ~__context = Db.PIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~carrier:false ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" ~speed:0L ~duplex:false ~pci_bus_path:"" ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Date.of_float 0.) ~other_config:[] + ~last_updated:Date.epoch ~other_config:[] in metrics diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 4fdefa0f8fb..49ea7194dc9 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -3127,10 +3127,10 @@ let get_license_state ~__context ~self:_ = | None -> "never" | Some date -> - if date = Date.of_float License_check.never then + if date = Date.of_unix_time License_check.never then "never" else - Date.to_string date + Date.to_rfc3339 date in [("edition", pool_edition); ("expiry", pool_expiry)] @@ -3285,7 +3285,7 @@ let alert_failed_login_attempts () = let now = Date.localtime () in let login_failures_between = Printf.sprintf "login failures between '%s' and last check" - (Date.to_string now) + (Date.to_rfc3339 now) in match Xapi_session.get_failed_login_stats () with | None -> @@ -3678,7 +3678,7 @@ let set_telemetry_next_collection ~__context ~self ~value = let err_msg = "Can't parse date and time for telemetry collection." in raise Api_errors.(Server_error (internal_error, [err_msg])) in - let ts = Date.to_string value in + let ts = Date.to_rfc3339 value in match Ptime.is_later dt_of_value ~than:dt_of_max_sched with | true -> raise Api_errors.(Server_error (telemetry_next_collection_too_late, [ts])) diff --git a/ocaml/xapi/xapi_pool.mli b/ocaml/xapi/xapi_pool.mli index 5fc33c66cad..9e74ea3f373 100644 --- a/ocaml/xapi/xapi_pool.mli +++ b/ocaml/xapi/xapi_pool.mli @@ -397,7 +397,7 @@ val set_https_only : val set_telemetry_next_collection : __context:Context.t -> self:API.ref_pool - -> value:Xapi_stdext_date.Date.iso8601 + -> value:Xapi_stdext_date.Date.t -> unit val reset_telemetry_uuid : __context:Context.t -> self:API.ref_pool -> unit diff --git a/ocaml/xapi/xapi_pool_license.ml b/ocaml/xapi/xapi_pool_license.ml index 5e69d64dee1..a37805127db 100644 --- a/ocaml/xapi/xapi_pool_license.ml +++ b/ocaml/xapi/xapi_pool_license.ml @@ -17,8 +17,8 @@ module D = Debug.Make (struct let name = "xapi_pool_license" end) open D (* Compare two date options, where None is always greater than (Some _) *) -let compare_dates (a : Xapi_stdext_date.Date.iso8601 option) - (b : Xapi_stdext_date.Date.iso8601 option) = +let compare_dates (a : Xapi_stdext_date.Date.t option) + (b : Xapi_stdext_date.Date.t option) = match (a, b) with | None, None -> 0 diff --git a/ocaml/xapi/xapi_pool_patch.ml b/ocaml/xapi/xapi_pool_patch.ml index 5988a1abc7c..72033070bc5 100644 --- a/ocaml/xapi/xapi_pool_patch.ml +++ b/ocaml/xapi/xapi_pool_patch.ml @@ -140,7 +140,7 @@ let get_patch_applied_to ~__context ~patch ~host = let write_patch_applied_db ~__context ?date ?(applied = true) ~self ~host () = let date = - Xapi_stdext_date.Date.of_float + Xapi_stdext_date.Date.of_unix_time (match date with Some d -> d | None -> Unix.gettimeofday ()) in match get_patch_applied_to ~__context ~patch:self ~host with diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 43564278dda..72a0ff7c705 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -41,7 +41,7 @@ module AuthFail : sig val on_fail : __context:Context.t - -> now:Date.iso8601 + -> now:Date.t -> uname:string option -> originator:string option -> record:[< `log_only | `log_and_alert] @@ -85,7 +85,7 @@ end = struct type client_failed_attempts = { client: client ; num_failed_attempts: int - ; last_failed_attempt: Date.iso8601 + ; last_failed_attempt: Date.t } let up_to_3 xs x = @@ -103,7 +103,7 @@ end = struct |} (string_of_client x.client) x.num_failed_attempts - (Date.to_string x.last_failed_attempt) + (Date.to_rfc3339 x.last_failed_attempt) type stats = { total_num_failed_attempts: int @@ -143,7 +143,7 @@ end = struct val get : unit -> stats option (* returns the number of failures from this client since last call to [ get ] *) - val record_client : client -> now:Date.iso8601 -> int + val record_client : client -> now:Date.t -> int (* returns number of failures from unknown clients since last call to [ get ] *) val record_unknown : unit -> int @@ -159,7 +159,7 @@ end = struct ctr ) - type value = {num_failed_attempts: int; last_failed_attempt: Date.iso8601} + type value = {num_failed_attempts: int; last_failed_attempt: Date.t} let table = Hashtbl.create 10 @@ -247,7 +247,7 @@ let _record_login_failure ~__context ~now ~uname ~originator ~record f = let record_login_failure ~__context ~uname ~originator ~record f = Context.with_tracing ?originator ~__context __FUNCTION__ @@ fun __context -> - let now = Unix.time () |> Date.of_float in + let now = Date.now () in _record_login_failure ~__context ~now ~uname ~originator ~record f let get_failed_login_stats = AuthFail.get_stats_string @@ -448,11 +448,12 @@ let revalidate_external_session ~__context ~session = (* 2. has the external session expired/does it need revalidation? *) let session_last_validation_time = - Date.to_float (Db.Session.get_validation_time ~__context ~self:session) + Date.to_unix_time + (Db.Session.get_validation_time ~__context ~self:session) in - let now = Unix.time () in + let now = Date.now () in let session_needs_revalidation = - now + Date.to_unix_time now > session_last_validation_time +. session_lifespan +. random_lifespan in if session_needs_revalidation then ( @@ -528,7 +529,7 @@ let revalidate_external_session ~__context ~session = (* session passed revalidation, let's update its last revalidation time *) Db.Session.set_validation_time ~__context ~self:session - ~value:(Date.of_float now) ; + ~value:now ; debug "updated validation time for session %s, sid %s " (trackid session) authenticated_user_sid ; (* let's also update the session's subject ref *) @@ -634,12 +635,11 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool (trackid session_id) pool (match uname with None -> "" | Some u -> u) originator is_local_superuser auth_user_sid (trackid parent) ; + let now = Date.now () in Db.Session.create ~__context ~ref:session_id ~uuid ~this_user:user - ~this_host:host ~pool - ~last_active:(Date.of_float (Unix.time ())) - ~other_config:[] ~subject ~is_local_superuser ~auth_user_sid - ~validation_time:(Date.of_float (Unix.time ())) - ~auth_user_name ~rbac_permissions ~parent ~originator ~client_certificate ; + ~this_host:host ~pool ~last_active:now ~other_config:[] ~subject + ~is_local_superuser ~auth_user_sid ~validation_time:now ~auth_user_name + ~rbac_permissions ~parent ~originator ~client_certificate ; if not pool then Atomic.incr total_sessions ; Ref.string_of session_id diff --git a/ocaml/xapi/xapi_session.mli b/ocaml/xapi/xapi_session.mli index 853284f3c4c..c228fc3bfc5 100644 --- a/ocaml/xapi/xapi_session.mli +++ b/ocaml/xapi/xapi_session.mli @@ -87,7 +87,7 @@ val create_from_db_file : (* for unit testing *) val _record_login_failure : __context:Context.t - -> now:Xapi_stdext_date.Date.iso8601 + -> now:Xapi_stdext_date.Date.t -> uname:string option -> originator:string option -> record:[< `log_only | `log_and_alert] diff --git a/ocaml/xapi/xapi_sr.ml b/ocaml/xapi/xapi_sr.ml index fc7e5dd768a..d572660e72d 100644 --- a/ocaml/xapi/xapi_sr.ml +++ b/ocaml/xapi/xapi_sr.ml @@ -682,7 +682,7 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = ~current_operations:[] ~allowed_operations:[] ~is_a_snapshot:vdi.is_a_snapshot ~snapshot_of:(find_vdi db_vdi_map vdi.snapshot_of) - ~snapshot_time:(Date.of_string vdi.snapshot_time) + ~snapshot_time:(Date.of_iso8601 vdi.snapshot_time) ~sR:sr ~virtual_size:vdi.virtual_size ~physical_utilisation:vdi.physical_utilisation ~_type:(try Storage_utils.vdi_type_of_string vdi.ty with _ -> `user) @@ -735,10 +735,10 @@ let update_vdis ~__context ~sr db_vdis vdi_infos = debug "%s is_a_snapshot <- %b" (Ref.string_of r) vi.is_a_snapshot ; Db.VDI.set_is_a_snapshot ~__context ~self:r ~value:vi.is_a_snapshot ) ; - if v.API.vDI_snapshot_time <> Date.of_string vi.snapshot_time then ( + if v.API.vDI_snapshot_time <> Date.of_iso8601 vi.snapshot_time then ( debug "%s snapshot_time <- %s" (Ref.string_of r) vi.snapshot_time ; Db.VDI.set_snapshot_time ~__context ~self:r - ~value:(Date.of_string vi.snapshot_time) + ~value:(Date.of_iso8601 vi.snapshot_time) ) ; let snapshot_of = find_vdi db_vdi_map vi.snapshot_of in if v.API.vDI_snapshot_of <> snapshot_of then ( diff --git a/ocaml/xapi/xapi_vbd.ml b/ocaml/xapi/xapi_vbd.ml index 4284523e2ba..0bd805e5a26 100644 --- a/ocaml/xapi/xapi_vbd.ml +++ b/ocaml/xapi/xapi_vbd.ml @@ -260,7 +260,7 @@ let create ~__context ~vM ~vDI ~device ~userdevice ~bootable ~mode ~_type let metrics = Ref.make () and metrics_uuid = Uuidx.to_string (Uuidx.make ()) in Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch ~other_config:[] ; (* Enable the SM driver to specify a VBD backend kind for the VDI *) let other_config = diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 3794d2c1fb7..c5a370df137 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -433,8 +433,7 @@ let copy ~__context ?vdi ~vm vbd = let metrics_uuid = Uuidx.to_string (Uuidx.make ()) in let vdi = Option.value ~default:all.API.vBD_VDI vdi in Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) - ~other_config:[] ; + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:Date.epoch ~other_config:[] ; Db.VBD.create ~__context ~ref:new_vbd ~uuid:vbd_uuid ~allowed_operations:[] ~current_operations:[] ~storage_lock:false ~vM:vm ~vDI:vdi ~empty:(all.API.vBD_empty || vdi = Ref.null) diff --git a/ocaml/xapi/xapi_vdi.mli b/ocaml/xapi/xapi_vdi.mli index 8e52daf8305..ff3e5a9e0ec 100644 --- a/ocaml/xapi/xapi_vdi.mli +++ b/ocaml/xapi/xapi_vdi.mli @@ -87,7 +87,7 @@ val pool_introduce : -> physical_utilisation:int64 -> metadata_of_pool:[`pool] API.Ref.t -> is_a_snapshot:bool - -> snapshot_time:API.Date.iso8601 + -> snapshot_time:API.Date.t -> snapshot_of:[`VDI] API.Ref.t -> cbt_enabled:bool -> [`VDI] Ref.t @@ -110,7 +110,7 @@ val db_introduce : -> physical_utilisation:int64 -> metadata_of_pool:[`pool] API.Ref.t -> is_a_snapshot:bool - -> snapshot_time:API.Date.iso8601 + -> snapshot_time:API.Date.t -> snapshot_of:[`VDI] API.Ref.t -> cbt_enabled:bool -> [`VDI] Ref.t @@ -208,7 +208,7 @@ val set_snapshot_of : __context:Context.t -> self:[`VDI] API.Ref.t -> value:[`VDI] API.Ref.t -> unit val set_snapshot_time : - __context:Context.t -> self:[`VDI] API.Ref.t -> value:API.Date.iso8601 -> unit + __context:Context.t -> self:[`VDI] API.Ref.t -> value:API.Date.t -> unit val set_metadata_of_pool : __context:Context.t diff --git a/ocaml/xapi/xapi_vif_helpers.ml b/ocaml/xapi/xapi_vif_helpers.ml index b7fd5eadd2d..4a469b84368 100644 --- a/ocaml/xapi/xapi_vif_helpers.ml +++ b/ocaml/xapi/xapi_vif_helpers.ml @@ -298,8 +298,7 @@ let create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config and metrics_uuid = Uuidx.to_string (Uuidx.make ()) in Db.VIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~io_read_kbs:0. ~io_write_kbs:0. - ~last_updated:(Xapi_stdext_date.Date.of_float 0.) - ~other_config:[] ; + ~last_updated:Xapi_stdext_date.Date.epoch ~other_config:[] ; let (_ : unit) = Db.VIF.create ~__context ~ref ~uuid:(Uuidx.to_string uuid) ~current_operations:[] ~allowed_operations:[] ~reserved:false ~device diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml index cb5f616d323..44b55614f31 100644 --- a/ocaml/xapi/xapi_vm.ml +++ b/ocaml/xapi/xapi_vm.ml @@ -617,8 +617,8 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version let current_domain_type = if suspended then domain_type else `unspecified in Db.VM_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid ~memory_actual:0L ~vCPUs_number:0L ~vCPUs_utilisation ~vCPUs_CPU:[] - ~vCPUs_params:[] ~vCPUs_flags:[] ~state:[] ~start_time:Date.never - ~install_time:Date.never ~last_updated:Date.never ~other_config:[] + ~vCPUs_params:[] ~vCPUs_flags:[] ~state:[] ~start_time:Date.epoch + ~install_time:Date.epoch ~last_updated:Date.epoch ~other_config:[] ~hvm:false ~nested_virt:false ~nomigrate:false ~current_domain_type ; let domain_type = if domain_type = `unspecified then @@ -652,7 +652,7 @@ let create ~__context ~name_label ~name_description ~power_state ~user_version ~power_state:_power_state ~allowed_operations:[] ~current_operations:[] ~blocked_operations:[] ~name_label ~name_description ~user_version ~is_a_template ~is_default_template:false ~transportable_snapshot_id:"" - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null + ~is_a_snapshot:false ~snapshot_time:Date.epoch ~snapshot_of:Ref.null ~parent:Ref.null ~snapshot_info:[] ~snapshot_metadata:"" ~resident_on ~scheduled_to_be_resident_on ~affinity ~memory_overhead:0L ~memory_static_max ~memory_dynamic_max ~memory_target ~memory_dynamic_min diff --git a/ocaml/xapi/xapi_vm_clone.ml b/ocaml/xapi/xapi_vm_clone.ml index 46c1e310ac2..c2a5211d250 100644 --- a/ocaml/xapi/xapi_vm_clone.ml +++ b/ocaml/xapi/xapi_vm_clone.ml @@ -343,9 +343,9 @@ let copy_vm_record ?snapshot_info_record ~__context ~vm ~disk_op ~new_name ~snapshot_of:(if is_a_snapshot then vm else Ref.null) ~snapshot_time: ( if is_a_snapshot then - Date.of_float (Unix.gettimeofday ()) + Date.now () else - Date.never + Date.epoch ) ~snapshot_info: ( match snapshot_info_record with diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index 1c295235b3c..0387dee1952 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -79,9 +79,7 @@ let set_is_a_template ~__context ~self ~value = info "VM.set_is_a_template('%b')" value ; let m = Db.VM.get_metrics ~__context ~self in ( if not value then - try - Db.VM_metrics.set_install_time ~__context ~self:m - ~value:(Date.of_float (Unix.gettimeofday ())) + try Db.VM_metrics.set_install_time ~__context ~self:m ~value:(Date.now ()) with _ -> warn "Could not update VM install time because metrics object was missing" @@ -1413,19 +1411,19 @@ let copy_metrics ~__context ~vm = m ) ~start_time: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_start_time) m ) ~install_time: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_install_time) m ) ~state: (Option.fold ~none:[] ~some:(fun x -> x.Db_actions.vM_metrics_state) m) ~last_updated: - (Option.fold ~none:Date.never + (Option.fold ~none:Date.epoch ~some:(fun x -> x.Db_actions.vM_metrics_last_updated) m ) diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index 8208cf89880..677da6fe8f1 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -1370,10 +1370,10 @@ let migrate_send' ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~vgpu_map let r = Int64.compare v1.size v2.size in if r = 0 then let t1 = - Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) + Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v1.vdi) in let t2 = - Date.to_float (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) + Date.to_unix_time (Db.VDI.get_snapshot_time ~__context ~self:v2.vdi) in compare t1 t2 else diff --git a/ocaml/xapi/xapi_vmss.ml b/ocaml/xapi/xapi_vmss.ml index 03badb83b60..d4a960cae81 100644 --- a/ocaml/xapi/xapi_vmss.ml +++ b/ocaml/xapi/xapi_vmss.ml @@ -228,7 +228,7 @@ let create ~__context ~name_label ~name_description ~enabled ~_type let uuid = Uuidx.to_string (Uuidx.make ()) in Db.VMSS.create ~__context ~ref ~uuid ~name_label ~name_description ~enabled ~_type ~retained_snapshots ~frequency ~schedule - ~last_run_time:(Xapi_stdext_date.Date.of_float 0.) ; + ~last_run_time:Xapi_stdext_date.Date.epoch ; ref let destroy_all_messages ~__context ~self = diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index 48c51740c77..f50e692a555 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -2282,14 +2282,16 @@ let update_vm ~__context id = Option.iter (fun (_, state) -> let metrics = Db.VM.get_metrics ~__context ~self in - let start_time = Date.of_float state.Vm.last_start_time in + let start_time = + Date.of_unix_time state.Vm.last_start_time + in if start_time <> Db.VM_metrics.get_start_time ~__context ~self:metrics then ( debug "xenopsd event: Updating VM %s last_start_time <- %s" id - (Date.to_string (Date.of_float state.Vm.last_start_time)) ; + Date.(to_rfc3339 (of_unix_time state.Vm.last_start_time)) ; Db.VM_metrics.set_start_time ~__context ~self:metrics ~value:start_time ; if @@ -2313,8 +2315,8 @@ let update_vm ~__context id = "VM %s guest metrics update time (%s) < VM start time \ (%s): deleting" id - (Date.to_string update_time) - (Date.to_string start_time) ; + (Date.to_rfc3339 update_time) + (Date.to_rfc3339 start_time) ; Xapi_vm_helpers.delete_guest_metrics ~__context ~self ; check_guest_agent () ) diff --git a/ocaml/xenopsd/lib/suspend_image.ml b/ocaml/xenopsd/lib/suspend_image.ml index 029224e7b06..e08cb53c268 100644 --- a/ocaml/xenopsd/lib/suspend_image.ml +++ b/ocaml/xenopsd/lib/suspend_image.ml @@ -42,7 +42,7 @@ module Xenops_record = struct [@@deriving sexp] let make ?vm_str ?xs_subtree () = - let time = Xapi_stdext_date.Date.(to_string (now ())) in + let time = Xapi_stdext_date.Date.(to_rfc3339 (now ())) in let word_size = Sys.word_size in {word_size; time; vm_str; xs_subtree} diff --git a/ocaml/xenopsd/lib/xenops_server.ml b/ocaml/xenopsd/lib/xenops_server.ml index 71ad563ed19..e65b929e1f4 100644 --- a/ocaml/xenopsd/lib/xenops_server.ml +++ b/ocaml/xenopsd/lib/xenops_server.ml @@ -1152,7 +1152,7 @@ module WorkerPool = struct let t' = Xenops_task.to_interface_task t in { id= t'.Task.id - ; ctime= t'.Task.ctime |> Date.of_float |> Date.to_string + ; ctime= t'.Task.ctime |> Date.of_unix_time |> Date.to_rfc3339 ; dbg= t'.Task.dbg ; subtasks= List.map diff --git a/ocaml/xenopsd/xc/memory_breakdown.ml b/ocaml/xenopsd/xc/memory_breakdown.ml index fae014a6ce3..54e739fe9aa 100644 --- a/ocaml/xenopsd/xc/memory_breakdown.ml +++ b/ocaml/xenopsd/xc/memory_breakdown.ml @@ -109,7 +109,7 @@ let xs_read_bytes_from_kib_key xs path = (** {2 Host fields} *) -let host_time _ = Date.to_string (Date.of_float (Unix.gettimeofday ())) +let host_time _ = Date.(to_rfc3339 (now ())) let host_total_bytes h = Int64.to_string diff --git a/ocaml/xenopsd/xc/memory_summary.ml b/ocaml/xenopsd/xc/memory_summary.ml index c63e495ccb4..e2abb80264d 100644 --- a/ocaml/xenopsd/xc/memory_summary.ml +++ b/ocaml/xenopsd/xc/memory_summary.ml @@ -55,7 +55,7 @@ let _ = in if not !hash then ( Printf.printf "%s %Ld %Ld" - (Date.to_string (Date.of_float (Unix.gettimeofday ()))) + Date.(to_rfc3339 (now ())) (total_pages ** one_page) (free_pages ** one_page) ; let domains = List.stable_sort (fun (a, _) (b, _) -> compare a b) domains From 7c28696a0c5b2b4fd9ec486146051f8c96e29597 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 10 Jul 2024 12:38:25 +0100 Subject: [PATCH 315/341] Clock: Use integers for date's timezone offsets Previously the comments and code talked about "timezone printing", but that is only a consequence of a more fundamental problem: datetimes without timezone do not share a frame of reference with datetimes that have a timezone, nor among themselves. This means that timezoneless shouldn't be compared with other timezones as assuming a timezone can be incorrect. Datetimes with timezone all share a frame of reference: the unix epoch. Now the code uses the exact offset in seconds, which also enables it to accept datetimes that have an offset different than 0. Comparisons with timezoneless datetimes are not forbidden for the time being. Instead UTC is assumed, like before, even if it might be the wrong guess. Signed-off-by: Pau Ruiz Safont --- ocaml/libs/clock/date.ml | 88 +++++++++++++++++-------------- ocaml/libs/clock/test_date.ml | 19 ++++--- ocaml/quicktest/quicktest_date.ml | 1 + 3 files changed, 62 insertions(+), 46 deletions(-) diff --git a/ocaml/libs/clock/date.ml b/ocaml/libs/clock/date.ml index a4a43cde623..a27c6b505ca 100644 --- a/ocaml/libs/clock/date.ml +++ b/ocaml/libs/clock/date.ml @@ -28,22 +28,24 @@ let months = let days = [|"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"|] -type print_timezone = Empty | TZ of string +(* iso8601 allows datetimes to not contain any timezone information. + Unfortunately we need to maintain this information because this means that + the timestamp cannot be converted back to a timestamp with UTC as a + reference. When serializing timezoneless timestamps, the timezone must be + avoided yet again. *) +type tz = int option -(* we must store the print_type with iso8601 to handle the case where the local time zone is UTC *) -type t = Ptime.date * Ptime.time * print_timezone +type t = Ptime.date * Ptime.time * tz -let utc = TZ "Z" +let utc = Some 0 -let of_dt print_type dt = +let of_dt tz dt = let date, time = dt in - (date, time, print_type) + (date, time, tz) let to_dt (date, time, _) = (date, time) let best_effort_iso8601_to_rfc3339 x = - (* (a) add dashes - * (b) add UTC tz if no tz provided *) let x = try Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest -> @@ -60,30 +62,40 @@ let best_effort_iso8601_to_rfc3339 x = in match tz with | None | Some "" -> - (* the caller didn't specify a tz. we must try to add one so that ptime can at least attempt to parse *) - (Printf.sprintf "%sZ" x, Empty) - | Some tz -> - (x, TZ tz) + (* the caller didn't specify a tz, use the Unqualified Local Time *) + Printf.sprintf "%s-00:00" x + | Some _ -> + x let of_iso8601 x = - let rfc3339, print_timezone = best_effort_iso8601_to_rfc3339 x in + let rfc3339 = best_effort_iso8601_to_rfc3339 x in match Ptime.of_rfc3339 rfc3339 |> Ptime.rfc3339_error_to_msg with | Error _ -> invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x) - | Ok (t, tz, _) -> ( - match tz with - | None | Some 0 -> - Ptime.to_date_time t |> of_dt print_timezone - | Some _ -> - invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x) - ) - -let to_rfc3339 ((y, mon, d), ((h, min, s), _), print_type) = - match print_type with - | TZ tz -> - Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz - | Empty -> - Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i" y mon d h min s + | Ok (t, None, _) -> + Ptime.to_date_time t |> of_dt None + | Ok (t, Some tz, _) -> + Ptime.to_date_time ~tz_offset_s:tz t |> of_dt (Some tz) + +let print_tz tz_s = + match tz_s with + | None -> + "" + | Some 0 -> + "Z" + | Some tz -> + let tz_sign = if tz < 0 then '-' else '+' in + let all_tz_minutes = tz / 60 |> Int.abs in + let tz_h = all_tz_minutes / 60 in + let tz_min = all_tz_minutes mod 60 in + Printf.sprintf "%c%02d:%02d" tz_sign tz_h tz_min + +let to_rfc3339 ((y, mon, d), ((h, min, s), _), tz) = + (* Must be compatible with iso8601 as well. Because some client limitations, + the hyphens between year, month and day have to be absent + *) + let tz = print_tz tz in + Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz (* Extracted from tondering.dk/claus/cal/chrweek.php#calcdow *) let weekday ~year ~mon ~day = @@ -92,10 +104,8 @@ let weekday ~year ~mon ~day = let m = mon + (12 * a) - 2 in (day + y + (y / 4) - (y / 100) + (y / 400) + (31 * m / 12)) mod 7 -let to_rfc822 ((year, mon, day), ((h, min, s), _), print_type) = - let timezone = - match print_type with Empty | TZ "Z" -> "GMT" | TZ tz -> tz - in +let to_rfc822 ((year, mon, day), ((h, min, s), _), tz) = + let timezone = match print_tz tz with "Z" -> "GMT" | tz -> tz in let weekday = weekday ~year ~mon ~day in Printf.sprintf "%s, %d %s %d %02d:%02d:%02d %s" days.(weekday) day months.(mon - 1) @@ -127,7 +137,7 @@ let to_unix_time t = to_ptime_t t |> Ptime.to_float_s let _localtime current_tz_offset t = let tz_offset_s = current_tz_offset |> Option.value ~default:0 in - let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt Empty in + let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt None in let _, (_, localtime_offset), _ = localtime in if localtime_offset <> tz_offset_s then invalid_arg @@ -152,19 +162,19 @@ let is_later ~than t = Ptime.is_later ~than:(to_ptime than) (to_ptime t) let diff a b = Ptime.diff (to_ptime a) (to_ptime b) -let compare_print_tz a b = +let compare_tz a b = match (a, b) with - | Empty, Empty -> + | None, None -> 0 - | TZ a_s, TZ b_s -> - String.compare a_s b_s - | Empty, TZ _ -> + | Some a_s, Some b_s -> + Int.compare a_s b_s + | None, Some _ -> -1 - | TZ _, Empty -> + | Some _, None -> 1 let compare ((_, _, a_z) as a) ((_, _, b_z) as b) = let ( ) a b = if a = 0 then b else a in - Ptime.compare (to_ptime a) (to_ptime b) compare_print_tz a_z b_z + Ptime.compare (to_ptime a) (to_ptime b) compare_tz a_z b_z let eq x y = compare x y = 0 diff --git a/ocaml/libs/clock/test_date.ml b/ocaml/libs/clock/test_date.ml index 78f673f635c..ccf7517e6fa 100644 --- a/ocaml/libs/clock/test_date.ml +++ b/ocaml/libs/clock/test_date.ml @@ -20,17 +20,21 @@ let tests = @@ eq (time |> of_unix_time) (time |> of_unix_time |> to_unix_time |> of_unix_time) in - let test_only_utc () = + let test_iso8601 () = let utc = "2020-12-20T18:10:19Z" in let _ = of_iso8601 utc in (* UTC is valid *) let non_utc = "2020-12-20T18:10:19+02:00" in - let exn = - Invalid_argument "Clock__Date.of_iso8601: 2020-12-20T18:10:19+02:00" + let _ = of_iso8601 non_utc in + () + in + let test_roundtrip_conversion () = + let non_utc = ["20201220T18:10:19+02:00"; "20201220T18:10:19-08:45"] in + let test spec = + let result = spec |> of_iso8601 |> to_rfc3339 in + Alcotest.(check string) "Roundtrip conversion be consistent" spec result in - Alcotest.check_raises "only UTC is accepted" exn (fun () -> - of_iso8601 non_utc |> ignore - ) + List.iter test non_utc in let test_ca333908 () = check_float "dash time and no dash time represent the same unix timestamp" @@ -113,7 +117,8 @@ let tests = in [ ("test_of_unix_time_invertible", `Quick, test_of_unix_time_invertible) - ; ("test_only_utc", `Quick, test_only_utc) + ; ("test_only_utc", `Quick, test_iso8601) + ; ("Roundtrip conversion", `Quick, test_roundtrip_conversion) ; ("test_ca333908", `Quick, test_ca333908) ; ( "test_of_iso8601_invertible_when_no_dashes" , `Quick diff --git a/ocaml/quicktest/quicktest_date.ml b/ocaml/quicktest/quicktest_date.ml index 35aec799cd4..19ff9153088 100644 --- a/ocaml/quicktest/quicktest_date.ml +++ b/ocaml/quicktest/quicktest_date.ml @@ -23,6 +23,7 @@ let test_message_get_since rpc session_id () = ; "%Y-%m-%dT%H:%M:%S" ; "%Y%m%dT%H:%M:%SZ" ; "%Y%m%dT%H:%M:%S" + ; "%Y%m%dT%H:%M:%S-12:00" ] |> List.iter test_with_format From 11c222da5912b1137259ec43fc1b187f3585dd25 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 12 Sep 2024 15:13:16 +0100 Subject: [PATCH 316/341] datamodel_lifecycle: bump to latest version Signed-off-by: Pau Ruiz Safont --- ocaml/idl/datamodel_lifecycle.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index fd2dee68944..bcd67b50acb 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -68,7 +68,7 @@ let prototyped_of_field = function | "host", "last_software_update" -> Some "22.20.0" | "VM_guest_metrics", "netbios_name" -> - Some "24.27.0-next" + Some "24.28.0" | "VM", "groups" -> Some "24.19.1" | "VM", "pending_guidances_full" -> From 8c30988cf189af21d73cc3f284028861d3ed8d4b Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 12 Sep 2024 15:06:01 +0100 Subject: [PATCH 317/341] CA-399187: Allow gencert to be called without groupid Last year, an argument to determine the group id of the certificates created was added. This broke test clients, and there's no need no make it compulsory since there's a default value: -1. Modify the binary so it can accept only 3 arguments like before, and change the help output to mention the group id. Signed-off-by: Pau Ruiz Safont --- ocaml/gencert/gencert.ml | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/ocaml/gencert/gencert.ml b/ocaml/gencert/gencert.ml index 695002ebb67..0d3284379ff 100644 --- a/ocaml/gencert/gencert.ml +++ b/ocaml/gencert/gencert.ml @@ -76,19 +76,34 @@ let () = let program_name = Sys.argv.(0) in let dbg = Printf.sprintf "%s - %f" program_name (Unix.gettimeofday ()) in (* if necessary use Unix.localtime to debug *) - D.debug "%s" dbg ; - match Sys.argv with - | [|_; path; _; _|] when Sys.file_exists path -> - D.info "file already exists at path (%s) - doing nothing" path ; - exit 0 - | [|_; path; cert_gid; sni|] -> ( + let sni_or_exit sni = match SNI.of_string sni with | Some sni -> - main ~dbg ~path ~cert_gid:(int_of_string cert_gid) ~sni () + sni | None -> D.error "SNI must be default or xapi:pool, but got '%s'" sni ; exit 1 - ) + in + let gid_or_exit gid = + match int_of_string_opt gid with + | Some gid -> + gid + | None -> + D.error "GROUPID must be an integer, but got '%s'" gid ; + exit 1 + in + D.debug "%s" dbg ; + match Sys.argv with + | ([|_; path; _|] | [|_; path; _; _|]) when Sys.file_exists path -> + D.info "file already exists at path (%s) - doing nothing" path ; + exit 0 + | [|_; path; sni|] -> + let sni = sni_or_exit sni in + main ~dbg ~path ~cert_gid:(-1) ~sni () + | [|_; path; cert_gid; sni|] -> + let sni = sni_or_exit sni in + let cert_gid = gid_or_exit cert_gid in + main ~dbg ~path ~cert_gid ~sni () | _ -> - D.error "Usage: %s PATH (default|xapi:pool)" program_name ; + D.error "Usage: %s PATH [GROUPID] (default|xapi:pool)" program_name ; exit 1 From f8a5f49942969639f1534048b631224d80ada7da Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 13 Sep 2024 17:30:56 +0100 Subject: [PATCH 318/341] CP-47509: Revisited the setting of response headers to avoid errors when multiple threads use the same session object. Signed-off-by: Konstantina Chremmou Signed-off-by: Danilo Del Busso --- ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs b/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs index 71c9ea81f4c..519cc430d4e 100644 --- a/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs +++ b/ocaml/sdk-gen/csharp/autogen/src/JsonRpc.cs @@ -304,15 +304,17 @@ protected virtual void PerformPostRequest(Stream postStream, Stream responseStre { webResponse = (HttpWebResponse)webRequest.GetResponse(); - ResponseHeaders = new Dictionary(); + var newResponseHeaders = new Dictionary(); if (webResponse.Headers != null) { var keys = webResponse.Headers.AllKeys; foreach (var key in keys) - ResponseHeaders.Add(key, string.Join(",", webResponse.Headers.Get(key))); + newResponseHeaders.Add(key, string.Join(",", webResponse.Headers.Get(key))); } + ResponseHeaders = newResponseHeaders; + if (webResponse.StatusCode != HttpStatusCode.OK) throw new WebException(webResponse.StatusCode.ToString()); From ada0dc7e99bd2c8b302af235af7bc0fd6d4019e5 Mon Sep 17 00:00:00 2001 From: Christian Lindig Date: Mon, 2 Sep 2024 15:50:46 +0100 Subject: [PATCH 319/341] CA-397599 XSI-1704 implement setter for blocked ops manually Currently the setter for field VM.blocked_oeprations is auto generated. Implement this explicitly such that we can update allowed operations which currently is not happening. Allowed operations are used by XenCenter to disable operations; this has led to operations becoming unavaliable because allowed operations where not aligned with blocked operations when blocked operations were updated. Signed-off-by: Christian Lindig --- ocaml/idl/datamodel_vm.ml | 42 +++++++++++++++++++++++++++++++- ocaml/xapi/message_forwarding.ml | 17 +++++++++++++ ocaml/xapi/xapi_vm.ml | 12 +++++++++ ocaml/xapi/xapi_vm.mli | 16 ++++++++++++ 4 files changed, 86 insertions(+), 1 deletion(-) diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index bf6fe168f8a..96939c4d5e2 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -1628,6 +1628,43 @@ let operations = ] ) +let set_blocked_operations = + call ~name:"set_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [ + (Ref _vm, "self", "The VM") + ; (Map (operations, String), "value", "Blocked operations") + ] + ~allowed_roles:_R_VM_ADMIN () + +let add_to_blocked_operations = + call ~name:"add_to_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [ + (Ref _vm, "self", "The VM") + ; (operations, "key", "Blocked operation") + ; (String, "value", "Error code") + ] + ~allowed_roles:_R_VM_ADMIN () + +let remove_from_blocked_operations = + call ~name:"remove_from_blocked_operations" + ~in_product_since:rel_orlando (* but updated 2024 *) + ~doc: + "Update list of operations which have been explicitly blocked and an \ + error code" + ~params: + [(Ref _vm, "self", "The VM"); (operations, "key", "Blocked operation")] + ~allowed_roles:_R_VM_ADMIN () + let assert_operation_valid = call ~in_oss_since:None ~in_product_since:rel_rio ~name:"assert_operation_valid" @@ -1909,6 +1946,9 @@ let t = ; restart_device_models ; set_uefi_mode ; get_secureboot_readiness + ; set_blocked_operations + ; add_to_blocked_operations + ; remove_from_blocked_operations ] ~contents: ([uid _vm] @@ -2086,7 +2126,7 @@ let t = ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) - ~qualifier:RW + ~qualifier:StaticRO ~ty:(Map (operations, String)) "blocked_operations" "List of operations which have been explicitly blocked and an \ diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index 2ba1139de32..6ed0bc04d85 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -3097,6 +3097,23 @@ functor let get_secureboot_readiness ~__context ~self = info "VM.get_secureboot_readiness: self = '%s'" (vm_uuid ~__context self) ; Local.VM.get_secureboot_readiness ~__context ~self + + let set_blocked_operations ~__context ~self ~value = + info "VM.set_blocked_operations: self = '%s'" (vm_uuid ~__context self) ; + Local.VM.set_blocked_operations ~__context ~self ~value ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let add_to_blocked_operations ~__context ~self ~key ~value = + info "VM.add_to_blocked_operations: self = '%s'" + (vm_uuid ~__context self) ; + Local.VM.add_to_blocked_operations ~__context ~self ~key ~value ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let remove_from_blocked_operations ~__context ~self ~key = + info "VM.remove_from_blocked_operations: self = '%s'" + (vm_uuid ~__context self) ; + Local.VM.remove_from_blocked_operations ~__context ~self ~key ; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self end module VM_metrics = struct end diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml index cb5f616d323..3e54a277592 100644 --- a/ocaml/xapi/xapi_vm.ml +++ b/ocaml/xapi/xapi_vm.ml @@ -1601,6 +1601,18 @@ let set_domain_type ~__context ~self ~value = Db.VM.set_HVM_boot_policy ~__context ~self ~value:(derive_hvm_boot_policy ~domain_type:value) +let set_blocked_operations ~__context ~self ~value = + debug "%s" __FUNCTION__ ; + Db.VM.set_blocked_operations ~__context ~self ~value + +let add_to_blocked_operations ~__context ~self ~key ~value = + debug "%s" __FUNCTION__ ; + Db.VM.add_to_blocked_operations ~__context ~self ~key ~value + +let remove_from_blocked_operations ~__context ~self ~key = + debug "%s" __FUNCTION__ ; + Db.VM.remove_from_blocked_operations ~__context ~self ~key + let set_HVM_boot_policy ~__context ~self ~value = Db.VM.set_domain_type ~__context ~self ~value:(derive_domain_type ~hVM_boot_policy:value) ; diff --git a/ocaml/xapi/xapi_vm.mli b/ocaml/xapi/xapi_vm.mli index 19a737755e0..d0771c49cfa 100644 --- a/ocaml/xapi/xapi_vm.mli +++ b/ocaml/xapi/xapi_vm.mli @@ -428,3 +428,19 @@ val set_uefi_mode : val get_secureboot_readiness : __context:Context.t -> self:API.ref_VM -> API.vm_secureboot_readiness + +val set_blocked_operations : + __context:Context.t + -> self:API.ref_VM + -> value:(API.vm_operations * string) list + -> unit + +val add_to_blocked_operations : + __context:Context.t + -> self:API.ref_VM + -> key:API.vm_operations + -> value:string + -> unit + +val remove_from_blocked_operations : + __context:Context.t -> self:API.ref_VM -> key:API.vm_operations -> unit From cd1a2dc109fcdd6817c7e55872fd19104e1f4129 Mon Sep 17 00:00:00 2001 From: Christian Lindig Date: Thu, 12 Sep 2024 13:19:11 +0100 Subject: [PATCH 320/341] CA-397599 XSI-1704 update schema hash Signed-off-by: Christian Lindig --- ocaml/idl/schematest.ml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index c375a909149..0afe0a10be1 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "428caff23cdb969c59a9960beefd7bb6" +let last_known_schema_hash = "60590fa3fa2f8af66d9bf3c50b7bacc2" let current_schema_hash : string = let open Datamodel_types in @@ -19,11 +19,10 @@ let () = if last_known_schema_hash <> current_schema_hash then ( Printf.eprintf {| - New schema hash ('%s') doesn't match the last known one. Please bump the -datamodel schema versions if necessary, and update 'last_known_schema_hash'. - +datamodel schema versions if necessary, and update 'last_known_schema_hash' +in file %s. |} - current_schema_hash ; + current_schema_hash __FILE__ ; exit 1 ) From 72cae2a77a030793996a3ac5687e7a7da4d620af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 27 Aug 2024 14:25:51 +0100 Subject: [PATCH 321/341] CP-50614: Add benchmark for Tracing module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The overhead is on the order of 10ns when disabled, but substantially higher when enabled. Signed-off-by: Edwin Török --- ocaml/tests/bench/bechamel_simple_cli.ml | 153 +++++++++++++++++++++++ ocaml/tests/bench/bench_tracing.ml | 87 +++++++++++++ ocaml/tests/bench/dune | 4 + 3 files changed, 244 insertions(+) create mode 100644 ocaml/tests/bench/bechamel_simple_cli.ml create mode 100644 ocaml/tests/bench/bench_tracing.ml create mode 100644 ocaml/tests/bench/dune diff --git a/ocaml/tests/bench/bechamel_simple_cli.ml b/ocaml/tests/bench/bechamel_simple_cli.ml new file mode 100644 index 00000000000..e40399cf04d --- /dev/null +++ b/ocaml/tests/bench/bechamel_simple_cli.ml @@ -0,0 +1,153 @@ +open Bechamel +open Toolkit + +(* Bechamel doesn't provide before/after hooks, just allocate/free, but those are done outside the place where + Bechamel checks for GC live words stabilization. +*) +let before_after ~before ~after ~get ~label ~unit = + let shared_state = Atomic.make None and called = Atomic.make 0 in + let module BeforeAfter = struct + type witness = int Atomic.t + + let make () = Atomic.make 0 + + let load t = Atomic.set t 0 + + let unload _ = () + + let label _ = label + + let unit _ = unit + + let get _ = + (* + We get added to the instances both at the beginning and the end, so we get called 4 times: + + get () - 0: None -> state := before () + time () + get () - 1 + + benchmark_loop () + + get () - 2 + time () + get () - 3, after state, state := None + + We want the time measurement to be as close to the benchmark loop as possible, + so we perform operations only on call 1 and 4 + *) + let phase = Atomic.fetch_and_add called 1 mod 4 in + let old = Atomic.get shared_state in + match (old, phase) with + | None, 0 -> + before () |> Option.some |> Atomic.set shared_state ; + 0. + | Some state, (1 | 2) -> + get state + | Some state, 3 -> + let r = get state in + Atomic.set shared_state None ; + after state ; + r + | None, _ -> + assert false + | Some _, _ -> + assert false + end in + let measure = Measure.register (module BeforeAfter) in + Measure.instance (module BeforeAfter) measure + +let skip_label = "workload" + +let thread_workload ~before ~run ~after = + let before () = + let state = before () + and stop = Atomic.make false + and loops = Atomic.make 0 in + let thread_worker () = + while not (Atomic.get stop) do + Sys.opaque_identity (run state : unit) ; + Atomic.incr loops + done + in + let t = Thread.create thread_worker () in + (state, stop, loops, t) + and after (state, stop, _loops, worker) = + Atomic.set stop true ; Thread.join worker ; after state + and get (_, _, loops, _) = Atomic.fetch_and_add loops 1 |> float_of_int in + before_after ~before ~after ~get ~label:skip_label ~unit:"loops" + +(* based on bechamel example code *) + +(* For very short benchmarks ensure that they get to run long enough to switch threads + a few times. + Bechamel has both an iteration count and time limit, so this won't be a problem for slower benchmarks. +*) +let limit = 10_000_000 + +let benchmark ~instances tests = + let cfg = Benchmark.cfg ~limit ~quota:(Time.second 10.0) () in + Benchmark.all cfg instances tests + +let analyze ~instances raw_results = + let ols ~bootstrap = + Analyze.ols ~bootstrap ~r_square:true ~predictors:[|Measure.run|] + in + let results = + List.map + (fun instance -> + let f bootstrap = Analyze.all (ols ~bootstrap) instance raw_results in + try f 3000 with _ -> f 0 + ) + instances + in + (Analyze.merge (ols ~bootstrap:3000) instances results, raw_results) + +open Notty_unix + +let img (window, results) = + Bechamel_notty.Multiple.image_of_ols_results ~rect:window + ~predictor:Measure.run results + |> eol + +let not_workload measure = not (Measure.label measure = skip_label) + +let run_and_print instances tests = + let results, _ = + tests + |> benchmark ~instances + |> analyze ~instances:(List.filter not_workload instances) + in + let window = + match winsize Unix.stdout with + | Some (w, h) -> + {Bechamel_notty.w; h} + | None -> + {Bechamel_notty.w= 80; h= 1} + in + img (window, results) |> eol |> output_image ; + results + |> Hashtbl.iter @@ fun label results -> + if label = Measure.label Instance.monotonic_clock then + let units = Bechamel_notty.Unit.unit_of_label label in + results + |> Hashtbl.iter @@ fun name ols -> + Format.printf "%s (%s):@, %a@." name units Analyze.OLS.pp ols + +let cli ?(always = []) ?(workloads = []) tests = + let instances = + always + @ Instance.[monotonic_clock; minor_allocated; major_allocated] + @ always + in + List.iter (fun i -> Bechamel_notty.Unit.add i (Measure.unit i)) instances ; + Format.printf "@,Running benchmarks (no workloads)@." ; + run_and_print instances tests ; + + if workloads <> [] then ( + Format.printf "@,Running benchmarks (workloads)@." ; + List.iter (fun i -> Bechamel_notty.Unit.add i (Measure.unit i)) workloads ; + (* workloads come first, so that we unpause them in time *) + let instances = workloads @ instances @ workloads in + run_and_print instances tests + ) diff --git a/ocaml/tests/bench/bench_tracing.ml b/ocaml/tests/bench/bench_tracing.ml new file mode 100644 index 00000000000..eebe6e6aef2 --- /dev/null +++ b/ocaml/tests/bench/bench_tracing.ml @@ -0,0 +1,87 @@ +open Bechamel + +let ( let@ ) f x = f x + +(* TODO: before *) + +let trace_test_inner span = + let@ span = + Tracing.with_child_trace + ~attributes:[("foo", "testing")] + span ~name:__FUNCTION__ + in + let@ _ = + Tracing.with_child_trace ~attributes:[("bar", "val")] span ~name:"test" + in + Sys.opaque_identity ignore () + +let trace_test_span _ = Tracing.with_tracing ~name:__FUNCTION__ trace_test_inner + +let trace_test_off _ = trace_test_inner None + +let uuid = "TEST" + +let export_thread = + (* need to ensure this isn't running outside the benchmarked section, + or bechamel might fail with 'Failed to stabilize GC' + *) + let after _ = Tracing_export.flush_and_exit () in + Bechamel_simple_cli.thread_workload ~before:Tracing_export.main ~after + ~run:ignore + +let workload1 = + Bechamel_simple_cli.thread_workload ~before:ignore ~after:ignore + ~run:trace_test_span + +let create_gc_work = + let a = Array.make 1_000 "" in + fun () -> + (* create work for the GC by continously creating a lot of short lived strings *) + Sys.opaque_identity (Array.iteri (fun i _ -> a.(i) <- String.make 2 'x') a) + +let workload2 = + Bechamel_simple_cli.thread_workload ~before:ignore ~after:ignore + ~run:create_gc_work + +let workloads = [workload1; workload2] + +let allocate () = + Tracing.TracerProvider.create ~enabled:true ~attributes:[] ~endpoints:[] + ~name_label:__MODULE__ ~uuid ; + Tracing_export.main () + +let free t = + Tracing.TracerProvider.destroy ~uuid ; + Tracing_export.flush_and_exit () ; + Thread.join t + +let test_tracing_on ?(overflow = false) ~name f = + let allocate () = + if overflow then ( + Tracing.Spans.set_max_spans 10 ; + Tracing.Spans.set_max_traces 10 + ) ; + allocate () + and free t = + if overflow then ( + Tracing.Spans.set_max_spans Bechamel_simple_cli.limit ; + Tracing.Spans.set_max_traces Bechamel_simple_cli.limit + ) ; + free t + in + Test.make_with_resource ~name ~allocate ~free Test.uniq f + +let benchmarks = + Tracing.Spans.set_max_spans Bechamel_simple_cli.limit ; + Tracing.Spans.set_max_traces Bechamel_simple_cli.limit ; + Test.make_grouped ~name:"tracing" + [ + Test.make ~name:"overhead(off)" (Staged.stage trace_test_off) + ; test_tracing_on ~name:"overhead(on, no span)" (Staged.stage trace_test_off) + ; test_tracing_on ~name:"overhead(on, create span)" + (Staged.stage trace_test_span) + ; test_tracing_on ~overflow:true ~name:"max span overflow" + (Staged.stage trace_test_span) + ] + +let () = Bechamel_simple_cli.cli ~always:[export_thread] ~workloads benchmarks diff --git a/ocaml/tests/bench/dune b/ocaml/tests/bench/dune new file mode 100644 index 00000000000..0d11700e285 --- /dev/null +++ b/ocaml/tests/bench/dune @@ -0,0 +1,4 @@ +(executable + (name bench_tracing) + (libraries tracing bechamel bechamel-notty notty.unix tracing_export threads.posix fmt notty) +) From 4a1cd13ed0aae5f30302b9d3455fafdbe398de98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 28 Aug 2024 10:42:31 +0100 Subject: [PATCH 322/341] CP-50614: Tracing: reduce overhead when max spans is hit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When max spans or max traces is hit we log debug messages at very high rate. Limit how many failures we log, and reset the failure limit every time we've cleared the finished spans table by exporting. Don't call the debug function at all when the counter is too high, this avoid s the overhead of formatting the string too (we should really use the Logs module which already knows how to efficiently disable formatting when logging is turned off). This improves performance significantly and avoids affecting benchmarks: ``` ╭─────────────────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────╮ │name │ major-allocated │ minor-allocated │ monotonic-clock │ workload │ workload │ ├─────────────────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┤ │ tracing/max span overflow │ 0.0000 mjw/run│ 1195.7273 mnw/run│ 130818578.6364 ns/run ╭─────────────────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────╮ │name │ major-allocated │ minor-allocated │ monotonic-clock │ workload │ workload │ ├─────────────────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┤ │ tracing/max span overflow │ 0.0000 mjw/run│ 480.7951 mnw/run│ 14724.6233 ns/run ``` Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 22d1e942288..83fd669f207 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -18,6 +18,14 @@ open D let fail fmt = Printf.ksprintf failwith fmt +let failures = Atomic.make 0 + +let not_throttled () = + let old = Atomic.fetch_and_add failures 1 in + old < 2 + +let reset_throttled () = Atomic.set failures 0 + module W3CBaggage = struct module Key = struct let is_valid_key str = @@ -340,13 +348,13 @@ module Spans = struct | None -> if Hashtbl.length spans < Atomic.get max_traces then Hashtbl.add spans key [span] - else + else if not_throttled () then debug "%s exceeded max traces when adding to span table" __FUNCTION__ | Some span_list -> if List.length span_list < Atomic.get max_spans then Hashtbl.replace spans key (span :: span_list) - else + else if not_throttled () then debug "%s exceeded max traces when adding to span table" __FUNCTION__ ) @@ -356,7 +364,8 @@ module Spans = struct Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> match Hashtbl.find_opt spans key with | None -> - debug "%s span does not exist or already finished" __FUNCTION__ ; + if not_throttled () then + debug "%s span does not exist or already finished" __FUNCTION__ ; None | Some span_list -> ( match @@ -377,13 +386,13 @@ module Spans = struct | None -> if Hashtbl.length finished_spans < Atomic.get max_traces then Hashtbl.add finished_spans key [span] - else + else if not_throttled () then debug "%s exceeded max traces when adding to finished span table" __FUNCTION__ | Some span_list -> if List.length span_list < Atomic.get max_spans then Hashtbl.replace finished_spans key (span :: span_list) - else + else if not_throttled () then debug "%s exceeded max traces when adding to finished span table" __FUNCTION__ ) @@ -408,6 +417,7 @@ module Spans = struct Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> let copy = Hashtbl.copy finished_spans in Hashtbl.clear finished_spans ; + reset_throttled () ; copy ) @@ -435,8 +445,10 @@ module Spans = struct Unix.gettimeofday () -. span.Span.begin_time in if elapsed > Atomic.get span_timeout *. 1000000. then ( - debug "Tracing: Span %s timed out, forcibly finishing now" - span.Span.context.span_id ; + if not_throttled () then + debug + "Tracing: Span %s timed out, forcibly finishing now" + span.Span.context.span_id ; let span = Span.finish ~span ~attributes: From dc7c5191efc1537fd0cef787db4de6acd9f4a7a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 27 Aug 2024 16:30:25 +0100 Subject: [PATCH 323/341] CP-50614: tracing: make Tracer.t equivalent to TracerProvider.t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'name' field was unused, and Tracer.t would otherwise be equivalent to TracerProvider.t. This also removes one level of indirection. It also enables further optimizations, where get_tracer could directly return a provider. Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 83fd669f207..bd620618b1c 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -578,20 +578,16 @@ module TracerProvider = struct end module Tracer = struct - type t = {_name: string; provider: TracerProvider.t} - - let create ~name ~provider = {_name= name; provider} + type t = TracerProvider.t let no_op = - let provider : TracerProvider.t = + TracerProvider. { name_label= "" ; attributes= Attributes.empty ; endpoints= [] ; enabled= false } - in - {_name= ""; provider} let get_tracer ~name = if Atomic.get observe then ( @@ -602,7 +598,7 @@ module Tracer = struct match List.find_opt TracerProvider.get_enabled providers with | Some provider -> - create ~name ~provider + provider | None -> warn "No provider found for tracing %s" name ; no_op @@ -625,15 +621,14 @@ module Tracer = struct let start ~tracer:t ?(attributes = []) ?(span_kind = SpanKind.Internal) ~name ~parent () : (Span.t option, exn) result = - (* Do not start span if the TracerProvider is diabled*) - if not t.provider.enabled then + let open TracerProvider in + (* Do not start span if the TracerProvider is disabled*) + if not t.enabled then ok_none else let attributes = Attributes.of_list attributes in let attributes = - Attributes.union - (fun _k a _b -> Some a) - attributes t.provider.attributes + Attributes.union (fun _k a _b -> Some a) attributes t.attributes in let span = Span.start ~attributes ~name ~parent ~span_kind () in Spans.add_to_spans ~span ; Ok (Some span) From 24d286d913742fa62242bdfecc87893eb1659db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 27 Aug 2024 16:46:30 +0100 Subject: [PATCH 324/341] CP-50614: Tracing: optimize get_tracer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of fetching a list of enabled tracers from the locked hashtable everytime, look it up directly. Change the 'observer' global boolean to an Atomic.t that stores the currently enabled TracerProvider (if any, a [no_op] tracer that is disabled otherwise). Now 'Tracer.get_tracer' can run without allocating memory, and without having to take any locks. When the tracer gets created/destroyed/enabled/disabled we walk the entire list and find the current tracer, if any, and set the current tracer appropriately. This is O(tracerproviders), but we only support tracerproviders=0, or tracerproviders=1, and this is a rare operation, that happens on startup or the first time the tracer gets enabled. tracing/overhead(on, create span) with workloads: ``` before: 14940.958498 ns/run (confidence: 16363.887949 to 13856.836570) after: 11975.740184 ns/run (confidence: 12710.262010 to 11463.052415) ``` Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 80 +++++++++++++++-------------------- 1 file changed, 33 insertions(+), 47 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index bd620618b1c..9143ad29eef 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -94,12 +94,6 @@ let validate_attribute (key, value) = && Re.execp attribute_key_regex key && W3CBaggage.Key.is_valid_key key -let observe = Atomic.make false - -let set_observe mode = Atomic.set observe mode - -let get_observe () = Atomic.get observe - module SpanKind = struct type t = Server | Consumer | Client | Producer | Internal [@@deriving rpcty] @@ -493,6 +487,18 @@ module TracerProvider = struct ; enabled: bool } + let no_op = + { + name_label= "" + ; attributes= Attributes.empty + ; endpoints= [] + ; enabled= false + } + + let current = Atomic.make no_op + + let get_current () = Atomic.get current + let get_name_label t = t.name_label let get_attributes t = Attributes.to_assoc_list t.attributes @@ -522,7 +528,7 @@ module TracerProvider = struct might not be aware that a TracerProvider has already been created.*) error "Tracing : TracerProvider %s already exists" name_label ) ; - if enabled then set_observe true + if enabled then Atomic.set current provider ) let get_tracer_providers_unlocked () = @@ -532,6 +538,18 @@ module TracerProvider = struct Xapi_stdext_threads.Threadext.Mutex.execute lock get_tracer_providers_unlocked + let update_providers_unlocked () = + let providers = get_tracer_providers_unlocked () in + match List.find_opt (fun provider -> provider.enabled) providers with + | None -> + Atomic.set current no_op ; + Xapi_stdext_threads.Threadext.Mutex.execute Spans.lock (fun () -> + Hashtbl.clear Spans.spans ; + Hashtbl.clear Spans.finished_spans + ) + | Some enabled -> + Atomic.set current enabled + let set ?enabled ?attributes ?endpoints ~uuid () = let update_provider (provider : t) enabled attributes endpoints = let enabled = Option.value ~default:provider.enabled enabled in @@ -556,54 +574,22 @@ module TracerProvider = struct fail "The TracerProvider : %s does not exist" uuid in Hashtbl.replace tracer_providers uuid provider ; - if - List.for_all - (fun provider -> not provider.enabled) - (get_tracer_providers_unlocked ()) - then ( - set_observe false ; - Xapi_stdext_threads.Threadext.Mutex.execute Spans.lock (fun () -> - Hashtbl.clear Spans.spans ; - Hashtbl.clear Spans.finished_spans - ) - ) else - set_observe true + update_providers_unlocked () ) let destroy ~uuid = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> let _ = Hashtbl.remove tracer_providers uuid in - if Hashtbl.length tracer_providers = 0 then set_observe false else () + update_providers_unlocked () ) end +let get_observe () = TracerProvider.(get_current ()).enabled + module Tracer = struct type t = TracerProvider.t - let no_op = - TracerProvider. - { - name_label= "" - ; attributes= Attributes.empty - ; endpoints= [] - ; enabled= false - } - - let get_tracer ~name = - if Atomic.get observe then ( - let providers = - Xapi_stdext_threads.Threadext.Mutex.execute TracerProvider.lock - TracerProvider.get_tracer_providers_unlocked - in - - match List.find_opt TracerProvider.get_enabled providers with - | Some provider -> - provider - | None -> - warn "No provider found for tracing %s" name ; - no_op - ) else - no_op + let get_tracer ~name:_ = TracerProvider.get_current () let span_of_span_context context name : Span.t = { @@ -634,7 +620,7 @@ module Tracer = struct Spans.add_to_spans ~span ; Ok (Some span) let update_span_with_parent span (parent : Span.t option) = - if Atomic.get observe then + if (TracerProvider.get_current ()).enabled then match parent with | None -> Some span @@ -686,8 +672,8 @@ let enable_span_garbage_collector ?(timeout = 86400.) () = Spans.GC.initialise_thread ~timeout let with_tracing ?(attributes = []) ?(parent = None) ~name f = - if Atomic.get observe then ( - let tracer = Tracer.get_tracer ~name in + let tracer = Tracer.get_tracer ~name in + if tracer.enabled then ( match Tracer.start ~tracer ~attributes ~name ~parent () with | Ok span -> ( try From cf7b52ee451d94c826ade071ec076586339010b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 28 Aug 2024 14:33:45 +0100 Subject: [PATCH 325/341] CP-50614: Tracing: drop unused function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 15 --------------- ocaml/libs/tracing/tracing.mli | 2 -- 2 files changed, 17 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 9143ad29eef..aa256605c3f 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -393,19 +393,6 @@ module Spans = struct let mark_finished span = Option.iter add_to_finished (remove_from_spans span) - let span_is_finished x = - match x with - | None -> - false - | Some (span : Span.t) -> - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt finished_spans span.context.trace_id with - | None -> - false - | Some span_list -> - List.mem span span_list - ) - (** since copies the existing finished spans and then clears the existing spans as to only export them once *) let since () = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> @@ -660,8 +647,6 @@ module Tracer = struct span ) - let span_is_finished x = Spans.span_is_finished x - let span_hashtbl_is_empty () = Spans.span_hashtbl_is_empty () let finished_span_hashtbl_is_empty () = diff --git a/ocaml/libs/tracing/tracing.mli b/ocaml/libs/tracing/tracing.mli index 42b700ebb51..e98f35a6a21 100644 --- a/ocaml/libs/tracing/tracing.mli +++ b/ocaml/libs/tracing/tracing.mli @@ -140,8 +140,6 @@ module Tracer : sig val finish : ?error:exn * string -> Span.t option -> (Span.t option, exn) result - val span_is_finished : Span.t option -> bool - val span_hashtbl_is_empty : unit -> bool val finished_span_hashtbl_is_empty : unit -> bool From ad7c2da1e48be50d05db8ffa2841f29e7599e097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 28 Aug 2024 14:41:35 +0100 Subject: [PATCH 326/341] CP-50614: Tracing: use a list instead of hashtbl for finished spans MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only need to traverse this once, in a direct order. without workload: before: 25172.588238 (confidence: 38289.717741 to 14519.713302); after: 15774.959559 ns/run (confidence: 23866.336958 to 9640.244697); with workload: before: 14940.958498 ns/run (confidence: 16363.887949 to 13856.836570) after: 11028.540184 ns/run (confidence: 11687.173016 to 10454.879814); Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 103 +++++++++++++++++++-------- ocaml/libs/tracing/tracing.mli | 35 +++++++-- ocaml/libs/tracing/tracing_export.ml | 33 +++++---- ocaml/tests/test_observer.ml | 54 ++++++++++++-- 4 files changed, 169 insertions(+), 56 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index aa256605c3f..b4c8d9ad136 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -150,18 +150,75 @@ module SpanEvent = struct type t = {name: string; time: float; attributes: string Attributes.t} end +module Trace_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end = struct + type t = int64 * int64 + + let make () = (Random.bits64 (), Random.bits64 ()) + + let of_string s = + try Scanf.sscanf s "%016Lx%016Lx" (fun a b -> (a, b)) + with e -> + D.debug "Failed to parse trace id %s: %s" s (Printexc.to_string e) ; + (* don't cause XAPI to fail *) + (0L, 0L) + + let to_string (a, b) = Printf.sprintf "%016Lx%016Lx" a b + + let compare (a1, a2) (b1, b2) = + match Int64.compare a1 b1 with 0 -> Int64.compare a2 b2 | n -> n +end + +module Span_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end = struct + type t = int64 + + let make = Random.bits64 + + let of_string s = Scanf.sscanf s "%Lx" Fun.id + + let to_string = Printf.sprintf "%016Lx" + + let compare = Int64.compare +end + module SpanContext = struct - type t = {trace_id: string; span_id: string} [@@deriving rpcty] + type t = {trace_id: Trace_id.t; span_id: Span_id.t} [@@deriving rpcty] let context trace_id span_id = {trace_id; span_id} - let to_traceparent t = Printf.sprintf "00-%s-%s-01" t.trace_id t.span_id + let to_traceparent t = + Printf.sprintf "00-%s-%s-01" + (Trace_id.to_string t.trace_id) + (Span_id.to_string t.span_id) let of_traceparent traceparent = let elements = String.split_on_char '-' traceparent in match elements with | ["00"; trace_id; span_id; _] -> - Some {trace_id; span_id} + Some + { + trace_id= Trace_id.of_string trace_id + ; span_id= Span_id.of_string span_id + } | _ -> None @@ -197,17 +254,15 @@ module Span = struct let get_context t = t.context - let generate_id n = String.init n (fun _ -> "0123456789abcdef".[Random.int 16]) - let start ?(attributes = Attributes.empty) ~name ~parent ~span_kind () = let trace_id = match parent with | None -> - generate_id 32 + Trace_id.make () | Some span_parent -> span_parent.context.trace_id in - let span_id = generate_id 16 in + let span_id = Span_id.make () in let context : SpanContext.t = {trace_id; span_id} in (* Using gettimeofday over Mtime as it is better for sharing timestamps between the systems *) let begin_time = Unix.gettimeofday () in @@ -323,7 +378,7 @@ module Spans = struct let set_max_traces x = Atomic.set max_traces x - let finished_spans = Hashtbl.create 100 + let finished_spans = ref ([], 0) let span_hashtbl_is_empty () = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> @@ -332,7 +387,7 @@ module Spans = struct let finished_span_hashtbl_is_empty () = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length finished_spans = 0 + snd !finished_spans = 0 ) let add_to_spans ~(span : Span.t) = @@ -374,21 +429,13 @@ module Spans = struct ) let add_to_finished span = - let key = span.Span.context.trace_id in Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt finished_spans key with - | None -> - if Hashtbl.length finished_spans < Atomic.get max_traces then - Hashtbl.add finished_spans key [span] - else if not_throttled () then - debug "%s exceeded max traces when adding to finished span table" - __FUNCTION__ - | Some span_list -> - if List.length span_list < Atomic.get max_spans then - Hashtbl.replace finished_spans key (span :: span_list) - else if not_throttled () then - debug "%s exceeded max traces when adding to finished span table" - __FUNCTION__ + let spans, n = !finished_spans in + if n < Atomic.get max_spans then + finished_spans := (span :: spans, n + 1) + else if not_throttled () then + debug "%s exceeded max traces when adding to finished span table" + __FUNCTION__ ) let mark_finished span = Option.iter add_to_finished (remove_from_spans span) @@ -396,15 +443,15 @@ module Spans = struct (** since copies the existing finished spans and then clears the existing spans as to only export them once *) let since () = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - let copy = Hashtbl.copy finished_spans in - Hashtbl.clear finished_spans ; + let copy = !finished_spans in + finished_spans := ([], 0) ; reset_throttled () ; copy ) let dump () = Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.(copy spans, Hashtbl.copy finished_spans) + Hashtbl.(copy spans, !finished_spans) ) module GC = struct @@ -429,7 +476,7 @@ module Spans = struct if not_throttled () then debug "Tracing: Span %s timed out, forcibly finishing now" - span.Span.context.span_id ; + (Span_id.to_string span.Span.context.span_id) ; let span = Span.finish ~span ~attributes: @@ -532,7 +579,7 @@ module TracerProvider = struct Atomic.set current no_op ; Xapi_stdext_threads.Threadext.Mutex.execute Spans.lock (fun () -> Hashtbl.clear Spans.spans ; - Hashtbl.clear Spans.finished_spans + Spans.finished_spans := ([], 0) ) | Some enabled -> Atomic.set current enabled diff --git a/ocaml/libs/tracing/tracing.mli b/ocaml/libs/tracing/tracing.mli index e98f35a6a21..930e85c4629 100644 --- a/ocaml/libs/tracing/tracing.mli +++ b/ocaml/libs/tracing/tracing.mli @@ -54,18 +54,40 @@ module SpanEvent : sig type t = {name: string; time: float; attributes: string Attributes.t} end -module SpanContext : sig +module Span_id : sig + type t + + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end + +module Trace_id : sig type t - val context : string -> string -> t + val make : unit -> t + + val compare : t -> t -> int + + val of_string : string -> t + + val to_string : t -> string +end + +module SpanContext : sig + type t val to_traceparent : t -> string val of_traceparent : string -> t option - val trace_id_of_span_context : t -> string + val trace_id_of_span_context : t -> Trace_id.t - val span_id_of_span_context : t -> string + val span_id_of_span_context : t -> Span_id.t end module Span : sig @@ -105,10 +127,9 @@ module Spans : sig val span_count : unit -> int - val since : unit -> (string, Span.t list) Hashtbl.t + val since : unit -> Span.t list * int - val dump : - unit -> (string, Span.t list) Hashtbl.t * (string, Span.t list) Hashtbl.t + val dump : unit -> (Trace_id.t, Span.t list) Hashtbl.t * (Span.t list * int) end module Tracer : sig diff --git a/ocaml/libs/tracing/tracing_export.ml b/ocaml/libs/tracing/tracing_export.ml index 5bb154d20c2..43761cdde1c 100644 --- a/ocaml/libs/tracing/tracing_export.ml +++ b/ocaml/libs/tracing/tracing_export.ml @@ -83,13 +83,24 @@ module Content = struct ) in { - id= s |> Span.get_context |> SpanContext.span_id_of_span_context - ; traceId= s |> Span.get_context |> SpanContext.trace_id_of_span_context + id= + s + |> Span.get_context + |> SpanContext.span_id_of_span_context + |> Span_id.to_string + ; traceId= + s + |> Span.get_context + |> SpanContext.trace_id_of_span_context + |> Trace_id.to_string ; parentId= s |> Span.get_parent |> Option.map (fun x -> - x |> Span.get_context |> SpanContext.span_id_of_span_context + x + |> Span.get_context + |> SpanContext.span_id_of_span_context + |> Span_id.to_string ) ; name= s |> Span.get_name ; timestamp= int_of_float (Span.get_begin_time s *. 1000000.) @@ -248,9 +259,7 @@ module Destination = struct | Bugtool -> (file_export, "Tracing.File.export") in - let all_spans = - Hashtbl.fold (fun _ spans acc -> spans @ acc) traces [] - in + let all_spans, count = traces in let attributes = [ ("export.span.count", all_spans |> List.length |> string_of_int) @@ -258,9 +267,7 @@ module Destination = struct ; ( "xs.tracing.spans_table.count" , Spans.span_count () |> string_of_int ) - ; ( "xs.tracing.finished_spans_table.count" - , traces |> Hashtbl.length |> string_of_int - ) + ; ("xs.tracing.finished_spans_table.count", string_of_int count) ] in let@ _ = with_tracing ~parent ~attributes ~name in @@ -273,17 +280,15 @@ module Destination = struct debug "Tracing: unable to export span : %s" (Printexc.to_string exn) let flush_spans () = - let span_list = Spans.since () in - let attributes = - [("export.traces.count", Hashtbl.length span_list |> string_of_int)] - in + let ((_span_list, span_count) as span_info) = Spans.since () in + let attributes = [("export.traces.count", string_of_int span_count)] in let@ parent = with_tracing ~parent:None ~attributes ~name:"Tracing.flush_spans" in TracerProvider.get_tracer_providers () |> List.filter TracerProvider.get_enabled |> List.concat_map TracerProvider.get_endpoints - |> List.iter (export_to_endpoint parent span_list) + |> List.iter (export_to_endpoint parent span_info) let delay = Delay.make () diff --git a/ocaml/tests/test_observer.ml b/ocaml/tests/test_observer.ml index 322c586cb20..9286df7af86 100644 --- a/ocaml/tests/test_observer.ml +++ b/ocaml/tests/test_observer.ml @@ -386,11 +386,7 @@ let test_all_spans_finish () = let remaining_spans, finished_spans = Spans.dump () in let result = Hashtbl.fold - (fun k v acc -> - Option.fold ~none:0 ~some:List.length (Hashtbl.find_opt finished_spans k) - = List.length v - && acc - ) + (fun _k v acc -> snd finished_spans = List.length v && acc) active_spans true in Alcotest.(check bool) @@ -440,8 +436,8 @@ let test_hashtbl_leaks () = let _, finished_spans = Spans.dump () in let filtered_spans_count = finished_spans - |> Hashtbl.to_seq_values - |> Seq.concat_map List.to_seq + |> fst + |> List.to_seq |> Seq.filter filter_export_spans |> Seq.length in @@ -587,6 +583,46 @@ let test_observed_components_of () = List.iter test_exp_comp expected_components_given_config_value ; observer_experimental_components := original_value +module type Id = sig + type t + + val compare : t -> t -> int + + val to_string : t -> string +end + +let testable_of_id (type a) (module I : Id with type t = a) = + let equal a b = I.compare a b = 0 and pp = Fmt.of_to_string I.to_string in + Alcotest.V1.testable pp equal + +let trace_id = testable_of_id (module Trace_id) + +let span_id = testable_of_id (module Span_id) + +let test_traceid () = + let expected = Trace_id.make () in + let str = expected |> Trace_id.to_string in + let actual = str |> Trace_id.of_string in + Alcotest.V1.check' trace_id ~expected ~actual ~msg:"roundtrip" ; + Alcotest.V1.(check' int ~expected:32 ~actual:(String.length str) ~msg:"length") + +let test_traceid' () = + let expected = "00000000000000010000000000000001" in + let actual = expected |> Trace_id.of_string |> Trace_id.to_string in + Alcotest.V1.(check' string ~expected ~actual ~msg:"roundtrip(str)") + +let test_spanid () = + let expected = Span_id.make () in + let str = expected |> Span_id.to_string in + let actual = str |> Span_id.of_string in + Alcotest.V1.check' span_id ~expected ~actual ~msg:"roundtrip" ; + Alcotest.V1.(check' int ~expected:16 ~actual:(String.length str) ~msg:"length") + +let test_spanid' () = + let expected = "0000000000000001" in + let actual = expected |> Span_id.of_string |> Span_id.to_string in + Alcotest.V1.(check' string ~expected ~actual ~msg:"roundtrip(str)") + let test = [ ( "test_observer_create_and_destroy" @@ -601,6 +637,10 @@ let test = ; ("test_tracing_exn_backtraces", `Quick, test_tracing_exn_backtraces) ; ("test_attribute_validation", `Quick, test_attribute_validation) ; ("test_observed_components_of", `Quick, test_observed_components_of) + ; ("test span_id", `Quick, test_spanid) + ; ("test trace_id", `Quick, test_traceid) + ; ("test span_id", `Quick, test_spanid') + ; ("test trace_id", `Quick, test_traceid') ] let () = From df7706806b838501f8a002f4f3bbb733418f6521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Tue, 27 Aug 2024 20:55:36 +0100 Subject: [PATCH 327/341] CP-50614: Tracing: merge attributes more efficiently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of constructing intermediate seq elements, directly fold the new attribute list into the old set. Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index b4c8d9ad136..f4c5ea8284c 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -135,6 +135,10 @@ end module Attributes = struct include Map.Make (String) + let merge_element map (key, value) = add key value map + + let merge_into into list = List.fold_left merge_element into list + let of_list list = List.to_seq list |> of_seq let to_assoc_list attr = to_seq attr |> List.of_seq @@ -646,10 +650,7 @@ module Tracer = struct if not t.enabled then ok_none else - let attributes = Attributes.of_list attributes in - let attributes = - Attributes.union (fun _k a _b -> Some a) attributes t.attributes - in + let attributes = Attributes.merge_into t.attributes attributes in let span = Span.start ~attributes ~name ~parent ~span_kind () in Spans.add_to_spans ~span ; Ok (Some span) From 82644e8d203c26ac73adccb00e68b3fd19e80ed4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 2 Sep 2024 10:49:34 +0100 Subject: [PATCH 328/341] CP-50614: Tracing: replace locks with atomic operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tracing shouldn't use locks, because we use it to find hot spots in the application. And introducing a single global lock might introduce lock contention that wouldn't otherwise be present. Use atomic operations on immutable data structures instead. Although a Map is O(log n), and not O(1) like a hashtable, it doesn't require holding any locks to traverse it, or update it. We only need to do an atomic compare-and-set operation once we've finished updating it, and if we raced with anyone (unlikely on OCaml4, unless you got interrupted by the tick thread), then try again with a backoff. We shouldn't hand roll atomic data structures like this, but instead use Saturn (Skip lists), or Kcas (which has a generic [update] function that implements the above method and also works on OCaml 5). before: 3827.092437 ns/run (confidence: 4275.705106 to 3550.511099); after: 2727.247326 ns/run (confidence: 3019.854167 to 2582.316754); Note: when benchmarking ensure that /sys/devices/system/clocksource/clocksource0/current_clocksource is set to TSC. If set to Xen, then reading timestamps is very slow. Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 211 ++++++++++++++++----------------- ocaml/libs/tracing/tracing.mli | 4 +- ocaml/tests/test_observer.ml | 4 +- 3 files changed, 109 insertions(+), 110 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index f4c5ea8284c..44cc72bf651 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -364,16 +364,24 @@ module Span = struct span end -module Spans = struct - let lock = Mutex.create () - - let spans = Hashtbl.create 100 +module SpanMap = Map.Make (Trace_id) - let span_count () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length spans +module Spans = struct + let spans = Atomic.make SpanMap.empty + + let rec update_spans f arg = + let old = Atomic.get spans in + let next = f old arg in + if Atomic.compare_and_set spans old next then + () + else ( + (* TODO: should use Kcas.update, or Saturn skip_lists for domains *) + Thread.yield () ; + (update_spans [@tailcall]) f arg ) + let span_count () = SpanMap.cardinal (Atomic.get spans) + let max_spans = Atomic.make 2500 let set_max_spans x = Atomic.set max_spans x @@ -382,123 +390,114 @@ module Spans = struct let set_max_traces x = Atomic.set max_traces x - let finished_spans = ref ([], 0) + let finished_spans = Atomic.make ([], 0) - let span_hashtbl_is_empty () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.length spans = 0 - ) + let span_hashtbl_is_empty () = SpanMap.is_empty (Atomic.get spans) - let finished_span_hashtbl_is_empty () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - snd !finished_spans = 0 - ) + let finished_span_hashtbl_is_empty () = Atomic.get finished_spans |> snd = 0 - let add_to_spans ~(span : Span.t) = + let add_to_spans_unlocked spans (span : Span.t) = let key = span.context.trace_id in - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt spans key with - | None -> - if Hashtbl.length spans < Atomic.get max_traces then - Hashtbl.add spans key [span] - else if not_throttled () then - debug "%s exceeded max traces when adding to span table" - __FUNCTION__ - | Some span_list -> - if List.length span_list < Atomic.get max_spans then - Hashtbl.replace spans key (span :: span_list) - else if not_throttled () then - debug "%s exceeded max traces when adding to span table" - __FUNCTION__ - ) + match SpanMap.find_opt key spans with + | None -> + if SpanMap.cardinal spans < Atomic.get max_traces then + SpanMap.add key [span] spans + else ( + if not_throttled () then + debug "%s exceeded max traces when adding to span table" + __FUNCTION__ ; + spans + ) + | Some span_list -> + if List.length span_list < Atomic.get max_spans then + SpanMap.add key (span :: span_list) spans + else ( + if not_throttled () then + debug "%s exceeded max traces when adding to span table" + __FUNCTION__ ; + spans + ) - let remove_from_spans span = + let add_to_spans ~span = update_spans add_to_spans_unlocked span + + let remove_from_spans_unlocked spans span = let key = span.Span.context.trace_id in - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - match Hashtbl.find_opt spans key with - | None -> - if not_throttled () then - debug "%s span does not exist or already finished" __FUNCTION__ ; - None - | Some span_list -> - ( match - List.filter (fun x -> x.Span.context <> span.context) span_list - with - | [] -> - Hashtbl.remove spans key - | filtered_list -> - Hashtbl.replace spans key filtered_list - ) ; - Some span + match SpanMap.find_opt key spans with + | None -> + if not_throttled () then + debug "%s span does not exist or already finished" __FUNCTION__ ; + spans + | Some span_list -> ( + match List.filter (fun x -> x.Span.context <> span.context) span_list with + | [] -> + SpanMap.remove key spans + | filtered_list -> + SpanMap.add key filtered_list spans ) - let add_to_finished span = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - let spans, n = !finished_spans in - if n < Atomic.get max_spans then - finished_spans := (span :: spans, n + 1) - else if not_throttled () then - debug "%s exceeded max traces when adding to finished span table" - __FUNCTION__ - ) + let remove_from_spans span = + update_spans remove_from_spans_unlocked span ; + Some span + + let rec add_to_finished span = + let ((spans, n) as old) = Atomic.get finished_spans in + if n < Atomic.get max_spans then + let next = (span :: spans, n + 1) in + if Atomic.compare_and_set finished_spans old next then + () + else + (add_to_finished [@tailcall]) span + else if not_throttled () then + debug "%s exceeded max traces when adding to finished span table" + __FUNCTION__ let mark_finished span = Option.iter add_to_finished (remove_from_spans span) + let empty_finished = ([], 0) + (** since copies the existing finished spans and then clears the existing spans as to only export them once *) let since () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - let copy = !finished_spans in - finished_spans := ([], 0) ; - reset_throttled () ; - copy - ) + let copy = Atomic.exchange finished_spans empty_finished in + reset_throttled () ; copy - let dump () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.(copy spans, !finished_spans) - ) + let dump () = (Atomic.get spans, Atomic.get finished_spans) module GC = struct - let lock = Mutex.create () - let span_timeout = Atomic.make 86400. (* one day in seconds *) let span_timeout_thread = ref None - let gc_inactive_spans () = - Xapi_stdext_threads.Threadext.Mutex.execute lock (fun () -> - Hashtbl.filter_map_inplace - (fun _ spanlist -> - let filtered = - List.filter_map - (fun span -> - let elapsed = - Unix.gettimeofday () -. span.Span.begin_time - in - if elapsed > Atomic.get span_timeout *. 1000000. then ( - if not_throttled () then - debug - "Tracing: Span %s timed out, forcibly finishing now" - (Span_id.to_string span.Span.context.span_id) ; - let span = - Span.finish ~span - ~attributes: - (Attributes.singleton "gc_inactive_span_timeout" - (string_of_float elapsed) - ) - () - in - add_to_finished span ; None - ) else - Some span - ) - spanlist - in - match filtered with [] -> None | spans -> Some spans - ) - spans - ) + let gc_inactive_spans_unlocked spans () = + SpanMap.filter_map + (fun _ spanlist -> + let filtered = + List.filter_map + (fun span -> + let elapsed = Unix.gettimeofday () -. span.Span.begin_time in + if elapsed > Atomic.get span_timeout *. 1000000. then ( + if not_throttled () then + debug "Tracing: Span %s timed out, forcibly finishing now" + (Span_id.to_string span.Span.context.span_id) ; + let span = + Span.finish ~span + ~attributes: + (Attributes.singleton "gc_inactive_span_timeout" + (string_of_float elapsed) + ) + () + in + add_to_finished span ; None + ) else + Some span + ) + spanlist + in + match filtered with [] -> None | spans -> Some spans + ) + spans + + let gc_inactive_spans () = update_spans gc_inactive_spans_unlocked () let initialise_thread ~timeout = Atomic.set span_timeout timeout ; @@ -581,10 +580,8 @@ module TracerProvider = struct match List.find_opt (fun provider -> provider.enabled) providers with | None -> Atomic.set current no_op ; - Xapi_stdext_threads.Threadext.Mutex.execute Spans.lock (fun () -> - Hashtbl.clear Spans.spans ; - Spans.finished_spans := ([], 0) - ) + Atomic.set Spans.spans SpanMap.empty ; + Atomic.set Spans.finished_spans Spans.empty_finished | Some enabled -> Atomic.set current enabled diff --git a/ocaml/libs/tracing/tracing.mli b/ocaml/libs/tracing/tracing.mli index 930e85c4629..715293bc3fd 100644 --- a/ocaml/libs/tracing/tracing.mli +++ b/ocaml/libs/tracing/tracing.mli @@ -120,6 +120,8 @@ module Span : sig val get_attributes : t -> (string * string) list end +module SpanMap : module type of Map.Make (Trace_id) + module Spans : sig val set_max_spans : int -> unit @@ -129,7 +131,7 @@ module Spans : sig val since : unit -> Span.t list * int - val dump : unit -> (Trace_id.t, Span.t list) Hashtbl.t * (Span.t list * int) + val dump : unit -> Span.t list SpanMap.t * (Span.t list * int) end module Tracer : sig diff --git a/ocaml/tests/test_observer.ml b/ocaml/tests/test_observer.ml index 9286df7af86..9adbdc4bc5f 100644 --- a/ocaml/tests/test_observer.ml +++ b/ocaml/tests/test_observer.ml @@ -385,7 +385,7 @@ let test_all_spans_finish () = let _ = List.map (fun span -> Tracer.finish span) trace_spans in let remaining_spans, finished_spans = Spans.dump () in let result = - Hashtbl.fold + SpanMap.fold (fun _k v acc -> snd finished_spans = List.length v && acc) active_spans true in @@ -393,7 +393,7 @@ let test_all_spans_finish () = "All spans that are finished are moved to finished_spans" true result ; Alcotest.(check int) "traces with no spans are removed from the hashtable" 0 - (Hashtbl.length remaining_spans) ; + (SpanMap.cardinal remaining_spans) ; test_destroy ~__context ~self () let test_hashtbl_leaks () = From 1ae22858f85a94230d8492e5c61ad756bc64f3b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 2 Sep 2024 11:25:40 +0100 Subject: [PATCH 329/341] CP-50614: Tracing: use a map instead of Span list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A small improvement, but could be more on deep span trees. before:2727.247326 ns/run (confidence: 3019.854167 to 2582.316754) after: 2590.000000 ns/run(confidence: 3362.115942 to 2068.393072); Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 50 ++++++++++++++++++---------------- ocaml/libs/tracing/tracing.mli | 6 ++-- ocaml/tests/test_observer.ml | 6 ++-- 3 files changed, 34 insertions(+), 28 deletions(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 44cc72bf651..4aefefcd737 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -364,10 +364,11 @@ module Span = struct span end -module SpanMap = Map.Make (Trace_id) +module TraceMap = Map.Make (Trace_id) +module SpanMap = Map.Make (Span_id) module Spans = struct - let spans = Atomic.make SpanMap.empty + let spans = Atomic.make TraceMap.empty let rec update_spans f arg = let old = Atomic.get spans in @@ -380,7 +381,7 @@ module Spans = struct (update_spans [@tailcall]) f arg ) - let span_count () = SpanMap.cardinal (Atomic.get spans) + let span_count () = TraceMap.cardinal (Atomic.get spans) let max_spans = Atomic.make 2500 @@ -392,16 +393,16 @@ module Spans = struct let finished_spans = Atomic.make ([], 0) - let span_hashtbl_is_empty () = SpanMap.is_empty (Atomic.get spans) + let span_hashtbl_is_empty () = TraceMap.is_empty (Atomic.get spans) let finished_span_hashtbl_is_empty () = Atomic.get finished_spans |> snd = 0 let add_to_spans_unlocked spans (span : Span.t) = let key = span.context.trace_id in - match SpanMap.find_opt key spans with + match TraceMap.find_opt key spans with | None -> - if SpanMap.cardinal spans < Atomic.get max_traces then - SpanMap.add key [span] spans + if TraceMap.cardinal spans < Atomic.get max_traces then + TraceMap.add key (SpanMap.singleton span.context.span_id span) spans else ( if not_throttled () then debug "%s exceeded max traces when adding to span table" @@ -409,8 +410,10 @@ module Spans = struct spans ) | Some span_list -> - if List.length span_list < Atomic.get max_spans then - SpanMap.add key (span :: span_list) spans + if SpanMap.cardinal span_list < Atomic.get max_spans then + TraceMap.add key + (SpanMap.add span.context.span_id span span_list) + spans else ( if not_throttled () then debug "%s exceeded max traces when adding to span table" @@ -422,18 +425,17 @@ module Spans = struct let remove_from_spans_unlocked spans span = let key = span.Span.context.trace_id in - match SpanMap.find_opt key spans with + match TraceMap.find_opt key spans with | None -> if not_throttled () then debug "%s span does not exist or already finished" __FUNCTION__ ; spans - | Some span_list -> ( - match List.filter (fun x -> x.Span.context <> span.context) span_list with - | [] -> - SpanMap.remove key spans - | filtered_list -> - SpanMap.add key filtered_list spans - ) + | Some span_list -> + let span_list = SpanMap.remove span.Span.context.span_id span_list in + if SpanMap.is_empty span_list then + TraceMap.remove key spans + else + TraceMap.add key span_list spans let remove_from_spans span = update_spans remove_from_spans_unlocked span ; @@ -445,8 +447,10 @@ module Spans = struct let next = (span :: spans, n + 1) in if Atomic.compare_and_set finished_spans old next then () - else + else ( + Thread.yield () ; (add_to_finished [@tailcall]) span + ) else if not_throttled () then debug "%s exceeded max traces when adding to finished span table" __FUNCTION__ @@ -469,11 +473,11 @@ module Spans = struct let span_timeout_thread = ref None let gc_inactive_spans_unlocked spans () = - SpanMap.filter_map + TraceMap.filter_map (fun _ spanlist -> let filtered = - List.filter_map - (fun span -> + SpanMap.filter_map + (fun _ span -> let elapsed = Unix.gettimeofday () -. span.Span.begin_time in if elapsed > Atomic.get span_timeout *. 1000000. then ( if not_throttled () then @@ -493,7 +497,7 @@ module Spans = struct ) spanlist in - match filtered with [] -> None | spans -> Some spans + if SpanMap.is_empty filtered then None else Some filtered ) spans @@ -580,7 +584,7 @@ module TracerProvider = struct match List.find_opt (fun provider -> provider.enabled) providers with | None -> Atomic.set current no_op ; - Atomic.set Spans.spans SpanMap.empty ; + Atomic.set Spans.spans TraceMap.empty ; Atomic.set Spans.finished_spans Spans.empty_finished | Some enabled -> Atomic.set current enabled diff --git a/ocaml/libs/tracing/tracing.mli b/ocaml/libs/tracing/tracing.mli index 715293bc3fd..e78153c9790 100644 --- a/ocaml/libs/tracing/tracing.mli +++ b/ocaml/libs/tracing/tracing.mli @@ -120,7 +120,9 @@ module Span : sig val get_attributes : t -> (string * string) list end -module SpanMap : module type of Map.Make (Trace_id) +module TraceMap : module type of Map.Make (Trace_id) + +module SpanMap : module type of Map.Make (Span_id) module Spans : sig val set_max_spans : int -> unit @@ -131,7 +133,7 @@ module Spans : sig val since : unit -> Span.t list * int - val dump : unit -> Span.t list SpanMap.t * (Span.t list * int) + val dump : unit -> Span.t SpanMap.t TraceMap.t * (Span.t list * int) end module Tracer : sig diff --git a/ocaml/tests/test_observer.ml b/ocaml/tests/test_observer.ml index 9adbdc4bc5f..7ea23a05939 100644 --- a/ocaml/tests/test_observer.ml +++ b/ocaml/tests/test_observer.ml @@ -385,15 +385,15 @@ let test_all_spans_finish () = let _ = List.map (fun span -> Tracer.finish span) trace_spans in let remaining_spans, finished_spans = Spans.dump () in let result = - SpanMap.fold - (fun _k v acc -> snd finished_spans = List.length v && acc) + TraceMap.fold + (fun _k v acc -> snd finished_spans = SpanMap.cardinal v && acc) active_spans true in Alcotest.(check bool) "All spans that are finished are moved to finished_spans" true result ; Alcotest.(check int) "traces with no spans are removed from the hashtable" 0 - (SpanMap.cardinal remaining_spans) ; + (TraceMap.cardinal remaining_spans) ; test_destroy ~__context ~self () let test_hashtbl_leaks () = From 3347189f67446ea158a8f7b9e1f2b162676bb1b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 2 Sep 2024 13:03:51 +0100 Subject: [PATCH 330/341] CP-50614: Tracing: be more acceptive of span/trace ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And print the failed ID. Signed-off-by: Edwin Török --- ocaml/libs/tracing/tracing.ml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ocaml/libs/tracing/tracing.ml b/ocaml/libs/tracing/tracing.ml index 4aefefcd737..ab097253dcb 100644 --- a/ocaml/libs/tracing/tracing.ml +++ b/ocaml/libs/tracing/tracing.ml @@ -197,7 +197,12 @@ end = struct let make = Random.bits64 - let of_string s = Scanf.sscanf s "%Lx" Fun.id + let of_string s = + try Scanf.sscanf s "%Lx" Fun.id + with e -> + D.debug "Failed to parse span id %s: %s" s (Printexc.to_string e) ; + (* don't cause XAPI to fail *) + 0L let to_string = Printf.sprintf "%016Lx" From 8b7029ac15552bf44bf064f681bf6d1ee3ca880e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 2 Sep 2024 17:17:37 +0100 Subject: [PATCH 331/341] CP-51483: epoll tests: support QCHECK_LONG_FACTOR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following environment variables can now be used by tests: * QCHECK_LONG_FACTOR (default 10): multiplies the default iteration count of 100 * QCHECK_SEED: an integer seed (the tests print their current seed on startup otherwise, can be used to reproduce a failure) These environment variables are already supported by the QCheck framework, we just need to use them. Signed-off-by: Edwin Török --- .../xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml | 2 +- ocaml/quicktest/quicktest | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml index d4e2d836675..be848f7b8a4 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/unixext_test.ml @@ -253,7 +253,7 @@ let check_subsets msg ((s1, s2, s3) as all) ((s1', s2', s3') as all') = let test_select = let gen, print = Generate.select_input in - Test.make ~long_factor:10 ~name:__FUNCTION__ ~print gen @@ fun t -> + Test.make ~name:__FUNCTION__ ~print gen @@ fun t -> (* epoll raised EEXIST, but none of the actual callers in XAPI need this, so skip *) diff --git a/ocaml/quicktest/quicktest b/ocaml/quicktest/quicktest index 89fa7927fef..0388c907ef0 100644 --- a/ocaml/quicktest/quicktest +++ b/ocaml/quicktest/quicktest @@ -1,5 +1,13 @@ #!/bin/bash ulimit -n 2048 + +# By default make the tests run 10x as many iterations as the default they +# would've run in the CI +# XenRT can further override this env var if desired +# For this to have an effect tests must NOT specify a long_factor of their own. +QCHECK_LONG_FACTOR=${QCHECK_LONG_FACTOR:=10} +export QCHECK_LONG_FACTOR +echo "QCHECK_LONG_FACTOR: ${QCHECK_LONG_FACTOR}" # Run quicktest with support for exception backtraces. OCAMLRUNPARAM=b "@OPTDIR@/debug/quicktestbin" "$@" From c681c2696d5930f6219a7ed3b0f014315d1ad871 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 16 Sep 2024 11:28:32 +0100 Subject: [PATCH 332/341] [ci]: enable merge queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Required for merge queues, so we have some tests that run in the merge queue: https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/configuring-pull-request-merges/managing-a-merge-queue#triggering-merge-group-checks-with-github-actions Signed-off-by: Edwin Török --- .github/workflows/format.yml | 1 + .github/workflows/main.yml | 1 + .github/workflows/other.yml | 1 + .github/workflows/shellcheck.yaml | 1 + 4 files changed, 4 insertions(+) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 4d0350056f3..3c2d7148f90 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -6,6 +6,7 @@ on: - master - 'feature/**' - '*-lcm' + merge_group: jobs: ocaml-format: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 79ce257d7f2..f3f7e00a69f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -6,6 +6,7 @@ on: schedule: # run daily, this refreshes the cache - cron: "13 2 * * *" + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index c4042638922..52c73729594 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -6,6 +6,7 @@ on: schedule: # run daily, this refreshes the cache - cron: "13 2 * * *" + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index c17568d821c..8be332ada23 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -2,6 +2,7 @@ name: ShellCheck on: pull_request: + merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: group: sc-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} From bcf4cb7c1b2e910fe6cb68ed3bc02216a6a7505b Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 16 Sep 2024 13:40:11 +0100 Subject: [PATCH 333/341] Third-Party DCO Remediation Commit for Gang Ji On behalf of Gang Ji , I, Pau Ruiz Safont , hereby add my Signed-off-by to this commit: e40fe60d32d5c893304b23fd73b296ad2ec5576d Signed-off-by: Pau Ruiz Safont From 233b96ba44b002a27205b0681f35527457b78c2c Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 11 Jul 2024 12:09:45 +0100 Subject: [PATCH 334/341] Clock: Change implementation of Date to use Ptime.t This allows to maintain sub-second precision, and only convert to datetimes when converting to strings. Now timezone conversion is more explicit. Datetimes without timezone are assumed to be UTC. While rare in practice, this is not safe in general, so print a warning to stdout whenever this happens. Signed-off-by: Pau Ruiz Safont --- ocaml/libs/clock/date.ml | 75 ++++++++----------- ocaml/libs/clock/date.mli | 46 +++++++----- ocaml/libs/clock/dune | 1 + ocaml/libs/clock/test_date.ml | 19 ++++- .../xapi-stdext/lib/xapi-stdext-date/date.ml | 2 + .../xapi-stdext/lib/xapi-stdext-date/date.mli | 8 +- quality-gate.sh | 2 +- 7 files changed, 87 insertions(+), 66 deletions(-) diff --git a/ocaml/libs/clock/date.ml b/ocaml/libs/clock/date.ml index a27c6b505ca..24c7df1a21f 100644 --- a/ocaml/libs/clock/date.ml +++ b/ocaml/libs/clock/date.ml @@ -10,6 +10,8 @@ GNU Lesser General Public License for more details. *) +module L = Debug.Make (struct let name = __MODULE__ end) + let months = [| "Jan" @@ -35,16 +37,10 @@ let days = [|"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"|] avoided yet again. *) type tz = int option -type t = Ptime.date * Ptime.time * tz +type t = {t: Ptime.t; tz: tz} let utc = Some 0 -let of_dt tz dt = - let date, time = dt in - (date, time, tz) - -let to_dt (date, time, _) = (date, time) - let best_effort_iso8601_to_rfc3339 x = let x = try @@ -72,10 +68,8 @@ let of_iso8601 x = match Ptime.of_rfc3339 rfc3339 |> Ptime.rfc3339_error_to_msg with | Error _ -> invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x) - | Ok (t, None, _) -> - Ptime.to_date_time t |> of_dt None - | Ok (t, Some tz, _) -> - Ptime.to_date_time ~tz_offset_s:tz t |> of_dt (Some tz) + | Ok (t, tz, _) -> + {t; tz} let print_tz tz_s = match tz_s with @@ -90,10 +84,11 @@ let print_tz tz_s = let tz_min = all_tz_minutes mod 60 in Printf.sprintf "%c%02d:%02d" tz_sign tz_h tz_min -let to_rfc3339 ((y, mon, d), ((h, min, s), _), tz) = +let to_rfc3339 {t; tz} = (* Must be compatible with iso8601 as well. Because some client limitations, the hyphens between year, month and day have to be absent *) + let (y, mon, d), ((h, min, s), _) = Ptime.to_date_time ?tz_offset_s:tz t in let tz = print_tz tz in Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz @@ -104,27 +99,25 @@ let weekday ~year ~mon ~day = let m = mon + (12 * a) - 2 in (day + y + (y / 4) - (y / 100) + (y / 400) + (31 * m / 12)) mod 7 -let to_rfc822 ((year, mon, day), ((h, min, s), _), tz) = +let to_rfc822 {t; tz} = + let (year, mon, day), ((h, min, s), _) = + Ptime.to_date_time ?tz_offset_s:tz t + in let timezone = match print_tz tz with "Z" -> "GMT" | tz -> tz in let weekday = weekday ~year ~mon ~day in Printf.sprintf "%s, %d %s %d %02d:%02d:%02d %s" days.(weekday) day months.(mon - 1) year h min s timezone -let to_ptime_t t = - match to_dt t |> Ptime.of_date_time with - | Some t -> +let to_ptime = function + | {t; tz= None} as d -> + L.warn "%s: Date %s converted to POSIX time, but timezone is missing" + __FUNCTION__ (to_rfc3339 d) ; + t + | {t; tz= Some _} -> t - | None -> - let _, (_, offset), _ = t in - invalid_arg - (Printf.sprintf "%s: dt='%s', offset='%i' is invalid" __FUNCTION__ - (to_rfc3339 t) offset - ) - -let to_ptime = to_ptime_t -let of_ptime t = Ptime.to_date_time t |> of_dt utc +let of_ptime t = {t; tz= utc} let of_unix_time s = match Ptime.of_float_s s with @@ -133,24 +126,22 @@ let of_unix_time s = | Some t -> of_ptime t -let to_unix_time t = to_ptime_t t |> Ptime.to_float_s +let to_unix_time t = to_ptime t |> Ptime.to_float_s -let _localtime current_tz_offset t = - let tz_offset_s = current_tz_offset |> Option.value ~default:0 in - let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt None in - let _, (_, localtime_offset), _ = localtime in - if localtime_offset <> tz_offset_s then - invalid_arg - (Printf.sprintf "%s: offsets don't match. offset='%i', t='%s'" - __FUNCTION__ tz_offset_s (Ptime.to_rfc3339 t) - ) ; - localtime +let strip_tz tz t = + let t = + match tz with + | None -> + t + | Some tz -> + Ptime.Span.of_int_s tz |> Ptime.add_span t |> Option.value ~default:t + in + {t; tz= None} -let _localtime_string current_tz_offset t = - _localtime current_tz_offset t |> to_rfc3339 +let _localtime_string tz t = strip_tz tz t |> to_rfc3339 let localtime () = - _localtime (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ()) + strip_tz (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ()) let now () = of_ptime (Ptime_clock.now ()) @@ -173,8 +164,8 @@ let compare_tz a b = | Some _, None -> 1 -let compare ((_, _, a_z) as a) ((_, _, b_z) as b) = +let compare a b = let ( ) a b = if a = 0 then b else a in - Ptime.compare (to_ptime a) (to_ptime b) compare_tz a_z b_z + Ptime.compare (to_ptime a) (to_ptime b) compare_tz a.tz b.tz -let eq x y = compare x y = 0 +let equal x y = if x == y then true else compare x y = 0 diff --git a/ocaml/libs/clock/date.mli b/ocaml/libs/clock/date.mli index 2a0123813b3..1ba0f19c9d9 100644 --- a/ocaml/libs/clock/date.mli +++ b/ocaml/libs/clock/date.mli @@ -12,24 +12,32 @@ * GNU Lesser General Public License for more details. *) -(** date-time with support for keeping timezone for ISO 8601 conversion *) +(** Nanosecond-precision POSIX timestamps, allows datetimes with unspecified + timezones. These are needed to produce and accept ISO 8601 datetimes without + timezones, but because the timezone is not known they do not share a common + point of time with any other timestamps they cannot be converted to unix + time, or be compared with other timestamps. All other timestamps have a + timezone attached to them, which will be used to serialize them to a + datetime string. This timezone is determined when creating a value and + cannot be changed. For timestamps created from datetime strings, the + timezone is maintained. For all other values UTC is used. *) type t (** Conversions *) val of_ptime : Ptime.t -> t -(** Convert ptime to time in UTC *) +(** Converts ptime to date *) val to_ptime : t -> Ptime.t (** Convert date/time to a ptime value: the number of seconds since 00:00:00 - UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *) + UTC, 1 Jan 1970. When {!t} lacks a timezone, UTC is assumed *) val of_unix_time : float -> t (** Convert calendar time [x] (as returned by e.g. Unix.time), to time in UTC *) val to_unix_time : t -> float (** Convert date/time to a unix timestamp: the number of seconds since - 00:00:00 UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *) + 00:00:00 UTC, 1 Jan 1970. When {!t} lacks a timezone, UTC is assumed *) val to_rfc822 : t -> string (** Convert date/time to email-formatted (RFC 822) string. *) @@ -39,38 +47,42 @@ val to_rfc3339 : t -> string the ISO 8601 format *) val of_iso8601 : string -> t -(** Convert ISO 8601 formatted string to a date/time value. Does not accept a - timezone annotated datetime - i.e. string must be UTC, and end with a Z *) +(** Convert ISO 8601 formatted string to a date/time value. Timezone can be + missing from the string, but that means some conversions will assume UTC, + which might be incorrect *) val epoch : t -(** 00:00:00 UTC, 1 Jan 1970, in UTC *) +(** 00:00:00 UTC, 1 Jan 1970 *) val now : unit -> t -(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in UTC *) +(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970 *) val _localtime_string : Ptime.tz_offset_s option -> Ptime.t -> string (** exposed for testing *) val localtime : unit -> t -(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in local - time *) +(** Local date time, the timezone is stripped. Do not use this call in new + code. *) (** Comparisons *) -val eq : t -> t -> bool -(** [eq a b] returns whether [a] and [b] are equal *) +val equal : t -> t -> bool +(** [equal a b] returns whether [a] and [b] are equal. Timestamps that are not + on UTC will only be equal to the values in their same memory position. *) val compare : t -> t -> int (** [compare a b] returns -1 if [a] is earlier than [b], 1 if [a] is later than - [b] or the ordering of the timezone printer *) + [b] or which timeone is sooner. When [a] or [b] lack a timezone, UTC is + assumed *) val is_earlier : than:t -> t -> bool (** [is_earlier ~than a] returns whether the timestamp [a] happens before - [than] *) + [than]. When [than] or [b] lack a timezone, UTC is assumed. *) val is_later : than:t -> t -> bool -(** [is_later ~than a] returns whether the timestamp [a] happens after [than] - *) +(** [is_later ~than a] returns whether the timestamp [a] happens after [than]. + When [than] or [b] lack a timezone, UTC is assumed. *) val diff : t -> t -> Ptime.Span.t -(** [diff a b] returns the span of time corresponding to [a - b] *) +(** [diff a b] returns the span of time corresponding to [a - b]. When [than] + or [b] lack a timezone, UTC is assumed. *) diff --git a/ocaml/libs/clock/dune b/ocaml/libs/clock/dune index 76285033f35..a2afef36460 100644 --- a/ocaml/libs/clock/dune +++ b/ocaml/libs/clock/dune @@ -8,6 +8,7 @@ mtime.clock.os (re_export ptime) ptime.clock.os + xapi-log ) ) diff --git a/ocaml/libs/clock/test_date.ml b/ocaml/libs/clock/test_date.ml index ccf7517e6fa..722bdbfd2ad 100644 --- a/ocaml/libs/clock/test_date.ml +++ b/ocaml/libs/clock/test_date.ml @@ -17,7 +17,7 @@ let tests = check_float "to_unix_time inverts of_unix_time" time (time |> of_unix_time |> to_unix_time) ; check_true "of_unix_time inverts to_unix_time" - @@ eq (time |> of_unix_time) + @@ equal (time |> of_unix_time) (time |> of_unix_time |> to_unix_time |> of_unix_time) in let test_iso8601 () = @@ -45,7 +45,7 @@ let tests = check_string "to_rfc3339 inverts of_iso8601" no_dash_utc_time_str (no_dash_utc_time_str |> of_iso8601 |> to_rfc3339) ; check_true "of_iso8601 inverts to_rfc3339" - (eq + (equal (no_dash_utc_time_str |> of_iso8601) (no_dash_utc_time_str |> of_iso8601 |> to_rfc3339 |> of_iso8601) ) @@ -93,8 +93,6 @@ let tests = check_string "can process missing tz with dashes, but return without dashes" missing_tz_no_dash (missing_tz_dash |> of_iso8601 |> to_rfc3339) ; - check_float "to_unix_time assumes UTC" 1607620760. - (missing_tz_no_dash |> of_iso8601 |> to_unix_time) ; let localtime' = localtime () in check_string "to_rfc3339 inverts of_iso8601 for localtime" (localtime' |> to_rfc3339) @@ -104,6 +102,15 @@ let tests = let formatted = of_unix_time unix_timestamp |> to_rfc822 in check_string "String is properly RFC-822-formatted" expected formatted in + let test_no_timezone_to_unix () = + (* this is allowed, but it will print a warning to stdout *) + let missing_tz_no_dash = "20201210T17:19:20" in + let with_tz_no_dash = "20201210T17:19:20Z" in + let to_unix_time dt = dt |> of_iso8601 |> to_unix_time in + check_float "Datetime without timezone assumes it's in UTC" + (to_unix_time with_tz_no_dash) + (to_unix_time missing_tz_no_dash) + in let test_email_dates () = let dates = [ @@ -131,6 +138,10 @@ let tests = ; ("test_localtime_string", `Quick, test_localtime_string) ; ("test_ca342171", `Quick, test_ca342171) ; ("test_xsi894", `Quick, test_xsi894) + ; ( "Date w/o timezone to POSIX time conversion" + , `Quick + , test_no_timezone_to_unix + ) ; ("RFC 822 formatting", `Quick, test_email_dates) ] diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml index 45e9bba5efb..ef0f98ce13a 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml @@ -28,6 +28,8 @@ let rfc822_of_float = of_unix_time let rfc822_to_string = to_rfc822 +let eq = equal + type iso8601 = t type rfc822 = t diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli index c34bcfd9e1c..7fb29404306 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.mli @@ -57,8 +57,8 @@ val localtime : unit -> t (** Comparisons *) -val eq : t -> t -> bool -(** [eq a b] returns whether [a] and [b] are equal *) +val equal : t -> t -> bool +(** [equal a b] returns whether [a] and [b] are equal *) val compare : t -> t -> int (** [compare a b] returns -1 if [a] is earlier than [b], 1 if [a] is later than @@ -77,6 +77,10 @@ val diff : t -> t -> Ptime.Span.t (** Deprecated bindings, these will be removed in a future release: *) +val eq : t -> t -> bool +[@@deprecated "Use Date.equal"] +(** [eq a b] returns whether [a] and [b] are equal *) + val rfc822_to_string : t -> string [@@deprecated "Use Date.to_rfc822"] (** Same as {!to_rfc822} *) diff --git a/quality-gate.sh b/quality-gate.sh index ab741ef3445..8e5a6ce8c26 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -40,7 +40,7 @@ mli-files () { } structural-equality () { - N=10 + N=11 EQ=$(git grep -r --count ' == ' -- '**/*.ml' ':!ocaml/sdk-gen/**/*.ml' | cut -d ':' -f 2 | paste -sd+ - | bc) if [ "$EQ" -eq "$N" ]; then echo "OK counted $EQ usages of ' == '" From 77a706303b5a27ac33e492fe775630e94efb3c29 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 17 Jul 2024 13:39:32 +0100 Subject: [PATCH 335/341] idl: add test to ensure datetime parameters are documented correctly Datetimes received as call parameters can lack timezone. In that case the host assumed the datetime is in UTC. This is not always safe to do, but returning an error will break clients. Instead keep doing the assumption and enforce proper documentation of the parameters in the datamodel. Signed-off-by: Pau Ruiz Safont --- ocaml/idl/dune | 7 +++--- ocaml/idl/test_datetimes.ml | 47 ++++++++++++++++++++++++++++++++++++ ocaml/idl/test_datetimes.mli | 0 3 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 ocaml/idl/test_datetimes.ml create mode 100644 ocaml/idl/test_datetimes.mli diff --git a/ocaml/idl/dune b/ocaml/idl/dune index 8d337c5c7f2..d971e6597df 100644 --- a/ocaml/idl/dune +++ b/ocaml/idl/dune @@ -45,11 +45,12 @@ (action (run %{x} -closed -markdown)) ) -(test - (name schematest) +(tests + (names schematest test_datetimes) (modes exe) - (modules schematest) + (modules schematest test_datetimes) (libraries + astring rpclib.core rpclib.json xapi_datamodel diff --git a/ocaml/idl/test_datetimes.ml b/ocaml/idl/test_datetimes.ml new file mode 100644 index 00000000000..ab31f313e5d --- /dev/null +++ b/ocaml/idl/test_datetimes.ml @@ -0,0 +1,47 @@ +module DT = Datamodel_types + +let calls_with_datetime_params = + let get_messages DT.{name; messages; _} = + List.to_seq messages + |> Seq.map (fun msg -> + DT.{msg with msg_name= Printf.sprintf "%s.%s" name msg.msg_name} + ) + in + let with_datetimes DT.{msg_name; msg_params; _} = + let cursed_params = + List.filter_map + (fun param -> + if + param.DT.param_type = DT.DateTime + && not (Astring.String.is_infix ~affix:"UTC" param.param_doc) + then + Some (msg_name, param.param_name, param.param_doc) + else + None + ) + msg_params + in + if cursed_params <> [] then Some (List.to_seq cursed_params) else None + in + + Datamodel.all_system + |> List.to_seq + |> Seq.concat_map get_messages + |> Seq.filter_map with_datetimes + |> Seq.concat + +let () = + if not (Seq.is_empty calls_with_datetime_params) then ( + Printf.printf + "\x1b[31;1mERROR\x1b[0m: Found datetime parameters in calls without \ + proper documentation. It must mention that datetimes are assumed to \ + have UTC when they do not contain a timezone. Parameters found:\n" ; + calls_with_datetime_params + |> Seq.iter (fun (call_name, param_name, param_doc) -> + Printf.printf "%s (%s): %s\n" call_name param_name param_doc + ) ; + exit 1 + ) else + Printf.printf + "\x1b[32;1mOK\x1b[0m: All datetime parameters in calls have proper \ + documentation." diff --git a/ocaml/idl/test_datetimes.mli b/ocaml/idl/test_datetimes.mli new file mode 100644 index 00000000000..e69de29bb2d From 0f9b1dfe1b029aba22e4b771ca172a714ea23770 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 17 Jul 2024 14:02:58 +0100 Subject: [PATCH 336/341] datamodel: document how timezone-less datetimes are handled Signed-off-by: Pau Ruiz Safont --- ocaml/idl/datamodel.ml | 41 ++++++++++++++++++++++++++++++------- ocaml/idl/datamodel_host.ml | 4 +++- ocaml/idl/datamodel_vm.ml | 6 +++++- 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 24b2e597057..737ecc53b0f 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -3894,7 +3894,9 @@ module VDI = struct ; { param_type= DateTime ; param_name= "snapshot_time" - ; param_doc= "Storage-specific config" + ; param_doc= + "Storage-specific config. When the timezone is missing, UTC is \ + assumed" ; param_release= tampa_release ; param_default= Some (VDateTime Date.epoch) } @@ -4088,7 +4090,11 @@ module VDI = struct ~params: [ (Ref _vdi, "self", "The VDI to modify") - ; (DateTime, "value", "The snapshot time of this VDI.") + ; ( DateTime + , "value" + , "The snapshot time of this VDI. When the timezone is missing, UTC \ + is assumed" + ) ] ~flags:[`Session] ~doc:"Sets the snapshot time of this VDI." ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () @@ -5510,7 +5516,11 @@ module VMPP = struct ~params: [ (Ref _vmpp, "self", "The protection policy") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the last backup was done. When the timezone is missing, \ + UTC is assumed" + ) ] () @@ -5520,7 +5530,11 @@ module VMPP = struct ~params: [ (Ref _vmpp, "self", "The protection policy") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the last archive was done. When the timezone is missing, \ + UTC is assumed" + ) ] () @@ -5781,7 +5795,11 @@ module VMSS = struct ~params: [ (Ref _vmss, "self", "The snapshot schedule") - ; (DateTime, "value", "the value to set") + ; ( DateTime + , "value" + , "When was the schedule was last run. When a timezone is missing, \ + UTC is assumed" + ) ] () @@ -6310,7 +6328,10 @@ module Message = struct [ (cls, "cls", "The class of object") ; (String, "obj_uuid", "The uuid of the object") - ; (DateTime, "since", "The cutoff time") + ; ( DateTime + , "since" + , "The cutoff time. When the timezone is missing, UTC is assumed" + ) ] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The relevant messages") @@ -6318,7 +6339,13 @@ module Message = struct let get_since = call ~name:"get_since" ~in_product_since:rel_orlando - ~params:[(DateTime, "since", "The cutoff time")] + ~params: + [ + ( DateTime + , "since" + , "The cutoff time. When the timezone is missing, UTC is assumed" + ) + ] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The relevant messages") ~allowed_roles:_R_READ_ONLY () diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index 08bef5570c7..d48470f3a71 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -935,7 +935,9 @@ let create_params = ; { param_type= DateTime ; param_name= "last_software_update" - ; param_doc= "Date and time when the last software update was applied." + ; param_doc= + "Date and time when the last software update was applied. When the \ + timezone is missing, UTC is assumed" ; param_release= dundee_release ; param_default= Some (VDateTime Date.epoch) } diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index 28d77f8ae67..b1b4609f4fb 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -339,7 +339,11 @@ let update_snapshot_metadata = [ (Ref _vm, "vm", "The VM to update") ; (Ref _vm, "snapshot_of", "") - ; (DateTime, "snapshot_time", "") + ; ( DateTime + , "snapshot_time" + , "The timestamp the snapshot was taken. When a timezone is missing, \ + UTC is assumed" + ) ; (String, "transportable_snapshot_id", "") ] ~allowed_roles:_R_POOL_OP () From 96195f7e80fc43302aaa3b69be5c41dd7962e4f3 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 17 Jul 2024 14:30:46 +0100 Subject: [PATCH 337/341] clock: Add timezone to all parsed dates Previously the dates without timezone were assumed to use UTC when being converted to unix time, but the datatype maintained the lack of timezone when being printed. This means that the problematic timezoneless timestamps were kept and could be produced by the hosts. Since the dates are assumed to be UTC, add the timezone when they are parsed for the first time. Now their timezone is displayed at all times. The output of Localtime is kept without a timezone because clients are not prepared to accept arbitrary timezones in dates. (Both the SDK and XO(Lite)) Signed-off-by: Pau Ruiz Safont --- ocaml/libs/clock/date.ml | 4 +-- ocaml/libs/clock/test_date.ml | 58 +++++++++++++++++++++++++++++------ 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/ocaml/libs/clock/date.ml b/ocaml/libs/clock/date.ml index 24c7df1a21f..c668b0c1fb3 100644 --- a/ocaml/libs/clock/date.ml +++ b/ocaml/libs/clock/date.ml @@ -58,8 +58,8 @@ let best_effort_iso8601_to_rfc3339 x = in match tz with | None | Some "" -> - (* the caller didn't specify a tz, use the Unqualified Local Time *) - Printf.sprintf "%s-00:00" x + (* the caller didn't specify a tz, assume Coordinatel Universal Time *) + Printf.sprintf "%sZ" x | Some _ -> x diff --git a/ocaml/libs/clock/test_date.ml b/ocaml/libs/clock/test_date.ml index 722bdbfd2ad..9318f44af54 100644 --- a/ocaml/libs/clock/test_date.ml +++ b/ocaml/libs/clock/test_date.ml @@ -10,6 +10,28 @@ let dash_time_str = "2020-04-07T08:28:32Z" let no_dash_utc_time_str = "20200407T08:28:32Z" +let best_effort_iso8601_to_rfc3339 x = + let x = + try + Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest -> + Printf.sprintf "%04d-%02d-%02dT%s" y mon d rest + ) + with _ -> x + in + let tz = + try + Scanf.sscanf x "%04d-%02d-%02dT%02d:%02d:%02d%s" (fun _ _ _ _ _ _ tz -> + Some tz + ) + with _ -> None + in + match tz with + | None | Some "" -> + (* the caller didn't specify a tz, use the Unqualified Local Time *) + Printf.sprintf "%s-00:00" x + | Some _ -> + x + let tests = let test_of_unix_time_invertible () = let non_int_time = 1586245987.70200706 in @@ -55,6 +77,21 @@ let tests = check_string "to_rfc3339 is backwards compatible" no_dash_utc_time_str (dash_time_str |> of_iso8601 |> to_rfc3339) in + let test_localtime () = + let time = localtime () in + match + time + |> to_rfc3339 + |> best_effort_iso8601_to_rfc3339 + |> Ptime.of_rfc3339 + |> Ptime.rfc3339_error_to_msg + with + | Ok (_, tz, _) -> + Alcotest.(check @@ option int) + "localtime generates a timestamp without timezone" None tz + | Error (`Msg msg) -> + Alcotest.failf "Unexpected error: %s" msg + in let test_localtime_string () = let[@warning "-8"] (Ok (t, _, _)) = Ptime.of_rfc3339 "2020-04-07T09:01:28Z" @@ -86,17 +123,19 @@ let tests = (String.contains localtime_string 'Z') in let test_xsi894 () = + let canonical = "20201210T17:19:20Z" in let missing_tz_no_dash = "20201210T17:19:20" in let missing_tz_dash = "2020-12-10T17:19:20" in - check_string "can process missing tz no dash" missing_tz_no_dash + check_string + "Timestamp without timezones nor dashes is accepted, gets converted to \ + UTC" + canonical (missing_tz_no_dash |> of_iso8601 |> to_rfc3339) ; - check_string "can process missing tz with dashes, but return without dashes" - missing_tz_no_dash - (missing_tz_dash |> of_iso8601 |> to_rfc3339) ; - let localtime' = localtime () in - check_string "to_rfc3339 inverts of_iso8601 for localtime" - (localtime' |> to_rfc3339) - (localtime' |> to_rfc3339 |> of_iso8601 |> to_rfc3339) + check_string + "Timestamp without timezones, and dashes is accepted, gets converted to \ + UTC" + canonical + (missing_tz_dash |> of_iso8601 |> to_rfc3339) in let test_email_date (unix_timestamp, expected) = let formatted = of_unix_time unix_timestamp |> to_rfc822 in @@ -135,9 +174,10 @@ let tests = , `Quick , test_to_rfc3339_backwards_compatibility ) + ; ("localtime is printed without timezone", `Quick, test_localtime) ; ("test_localtime_string", `Quick, test_localtime_string) ; ("test_ca342171", `Quick, test_ca342171) - ; ("test_xsi894", `Quick, test_xsi894) + ; ("Parsing datetimes without timezones", `Quick, test_xsi894) ; ( "Date w/o timezone to POSIX time conversion" , `Quick , test_no_timezone_to_unix From 57f657fd85771d901b18658c561f8a0d592f1f0b Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 16 Sep 2024 15:14:00 +0100 Subject: [PATCH 338/341] Specify OCaml version for ocamlformat's output OCamlformat will correctly format new features only when it's aware the OCaml version it's outputting provides them. This allows to improve formatting for let-punning, punned labelled arguments, and more: https://github.com/ocaml-ppx/ocamlformat/blob/main/CHANGES.md#added-8 Signed-off-by: Andrii Sultanov --- .ocamlformat | 1 + 1 file changed, 1 insertion(+) diff --git a/.ocamlformat b/.ocamlformat index f86522707f6..77d9adfc386 100644 --- a/.ocamlformat +++ b/.ocamlformat @@ -7,3 +7,4 @@ break-separators=before break-infix=fit-or-vertical break-infix-before-func=false sequence-blank-line=preserve-one +ocaml-version=4.14 From 959ebf64a7c1bcfd87f126e471ba51659dbc4428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 16 Sep 2024 17:41:27 +0100 Subject: [PATCH 339/341] CP-50615: increase max_spans MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit max-spans=1000 is small for a VM.start, now that we have more instrumentation. With the other optimizations in this series of commits it should now be safe to increase the number of spans, we no longer perform O(n^2) operations on them. Signed-off-by: Edwin Török --- ocaml/xapi/xapi_globs.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 56fbce47edd..cbaa7430e88 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1019,7 +1019,7 @@ let trace_log_dir = ref "/var/log/dt/zipkinv2/json" let export_interval = ref 30. -let max_spans = ref 1000 +let max_spans = ref 10000 let max_traces = ref 10000 From 9637ebfeb1007164d5884f6fd631fe6142d9b4a5 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 17 Sep 2024 10:02:40 +0100 Subject: [PATCH 340/341] opam: declare missing dependencies clock now uses xapi-log, so it needs to have the dependency declared in opam Signed-off-by: Pau Ruiz Safont --- clock.opam | 1 + dune-project | 1 + 2 files changed, 2 insertions(+) diff --git a/clock.opam b/clock.opam index 45b4fd162c2..73192316295 100644 --- a/clock.opam +++ b/clock.opam @@ -13,6 +13,7 @@ depends: [ "astring" "mtime" "ptime" + "xapi-log" {= version} "qcheck-core" {with-test} "qcheck-alcotest" {with-test} "odoc" {with-doc} diff --git a/dune-project b/dune-project index 88080ce624c..94a885046a7 100644 --- a/dune-project +++ b/dune-project @@ -29,6 +29,7 @@ astring mtime ptime + (xapi-log (= :version)) (qcheck-core :with-test) (qcheck-alcotest :with-test) ) From 807990b0c3c7d0e791472059193e6a874c236d6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 18 Sep 2024 10:50:46 +0100 Subject: [PATCH 341/341] [ci]: make concurrency group more unique MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We notice that required CI checks cancel themselves even when attempting to merge a single PR at a time. That is probably because there are CI jobs run on both 'push' and 'merge_group'. Try to make the concurrency group more unique by adding the github event name to the group key. Signed-off-by: Edwin Török --- .github/workflows/main.yml | 2 +- .github/workflows/other.yml | 2 +- .github/workflows/shellcheck.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f3f7e00a69f..580b27f6288 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,7 +9,7 @@ on: merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: ${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml index 52c73729594..d6ad9c849a6 100644 --- a/.github/workflows/other.yml +++ b/.github/workflows/other.yml @@ -9,7 +9,7 @@ on: merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: ${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index 8be332ada23..b078eaba549 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -5,7 +5,7 @@ on: merge_group: concurrency: # On new push, cancel old workflows from the same PR, branch or tag: - group: sc-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + group: sc-${{ github.workflow }}-${{github.event_name}}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: