From 4ce5a101a5fdd4f3dc21d3d07f24b7c1a4869284 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Thu, 8 Oct 2020 14:49:22 +0200 Subject: [PATCH 01/11] tests: Add 'loadtest' test tag Supposed to be used for heavy load environment preparation or for running load tests in general. Should not be included in regular test runs. --- src/tests/dbus-tests/run_tests.py | 6 +++++- src/tests/dbus-tests/udiskstestcase.py | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/tests/dbus-tests/run_tests.py b/src/tests/dbus-tests/run_tests.py index 3495e12314..653ab889a7 100755 --- a/src/tests/dbus-tests/run_tests.py +++ b/src/tests/dbus-tests/run_tests.py @@ -184,6 +184,8 @@ def _get_test_tags(test): tags.add(udiskstestcase.TestTags.NOSTORAGE) if getattr(test_fn, "extradeps", False) or getattr(test_fn.__self__, "extradeps", False): tags.add(udiskstestcase.TestTags.EXTRADEPS) + if getattr(test_fn, "loadtest", False) or getattr(test_fn.__self__, "loadtest", False): + tags.add(udiskstestcase.TestTags.LOADTEST) tags.add(udiskstestcase.TestTags.ALL) @@ -308,13 +310,15 @@ def parse_args(): print('Unknown tag(s) specified:', ', '.join(args.exclude_tags - all_tags), file=sys.stderr) sys.exit(1) - # for backwards compatibility we want to exclude unsafe and unstable by default + # for backwards compatibility we want to exclude unsafe, unstable and loadtests by default if not 'JENKINS_HOME' in os.environ and not (udiskstestcase.TestTags.UNSAFE.value in args.include_tags or udiskstestcase.TestTags.ALL.value in args.include_tags): args.exclude_tags.add(udiskstestcase.TestTags.UNSAFE.value) if not (udiskstestcase.TestTags.UNSTABLE.value in args.include_tags or udiskstestcase.TestTags.ALL.value in args.include_tags): args.exclude_tags.add(udiskstestcase.TestTags.UNSTABLE.value) + if not (udiskstestcase.TestTags.LOADTEST.value in args.include_tags): + args.exclude_tags.add(udiskstestcase.TestTags.LOADTEST.value) return args diff --git a/src/tests/dbus-tests/udiskstestcase.py b/src/tests/dbus-tests/udiskstestcase.py index cc3c09b3e5..8809d1312a 100644 --- a/src/tests/dbus-tests/udiskstestcase.py +++ b/src/tests/dbus-tests/udiskstestcase.py @@ -656,6 +656,7 @@ class TestTags(Enum): UNSAFE = "unsafe" # tests that change system configuration NOSTORAGE = "nostorage" # tests that don't work with storage EXTRADEPS = "extradeps" # tests that require special configuration and/or device to run + LOADTEST = "loadtest" # tests used to prepare heavy load environment @classmethod def get_tags(cls): @@ -678,6 +679,7 @@ def decorator(func): func.unsafe = TestTags.UNSAFE in tags func.nostorage = TestTags.NOSTORAGE in tags func.extradeps = TestTags.EXTRADEPS in tags + func.loadtest = TestTags.LOADTEST in tags return func From 1910d5c8fa5b853ccef3f229784d8f853569dc36 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Thu, 8 Oct 2020 15:51:59 +0200 Subject: [PATCH 02/11] test: Add LVM snapshots load test This creates 1000 LVM snapshots over a persistent image file attached as loop device. Splitting the test case into "setup" and "teardown" parts allows debugging on such heavy environment. --- src/tests/dbus-tests/test_20_LVM.py | 91 +++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/src/tests/dbus-tests/test_20_LVM.py b/src/tests/dbus-tests/test_20_LVM.py index 43790e8e77..4c9b3df999 100644 --- a/src/tests/dbus-tests/test_20_LVM.py +++ b/src/tests/dbus-tests/test_20_LVM.py @@ -6,6 +6,7 @@ from distutils.version import LooseVersion +import safe_dbus import udiskstestcase import gi @@ -818,3 +819,93 @@ def test_reformat_inactive_vg_locked(self): fstab = self.read_file('/etc/fstab') self.assertIn(name, fstab) self.assertIn(self.fs_uuid, fstab) + + + +class UdisksLVMLoadTest(UDisksLVMTestBase): + '''Load tests with heavy LVM presence''' + + IMG_FILE = '/tmp/udisks-lvm-load-test.img' + NUM_SNAPS = 1000 + + def find_loop_for_backing_file(self, backing_file): + ''' Finds a loop object for given backing file and returns its object path ''' + + objects = safe_dbus.call_sync(self.iface_prefix, + self.path_prefix, + 'org.freedesktop.DBus.ObjectManager', + 'GetManagedObjects', + None) + + loops = {k: v for (k, v) in objects[0].items() if '/block_devices/loop' in k} + for obj_path, properties in loops.items(): + if self.iface_prefix + '.Loop' in properties and \ + properties[self.iface_prefix + '.Loop']['BackingFile'] == self.str_to_ay(backing_file): + return obj_path + + return None + + @udiskstestcase.tag_test(udiskstestcase.TestTags.LOADTEST) + def test_01_setup_1000_snapshots(self): + ''' Create 1000 snapshots on a thin provisioned LV ''' + + # create persistent sparse file that will hold the LVM structure across test runs + self.assertFalse(os.path.exists(self.IMG_FILE)) + ret, _out = self.run_command('truncate -s 10G %s' % self.IMG_FILE) + self.assertEqual(ret, 0) + + manager = self.get_object('/Manager') + with open(self.IMG_FILE, "r+b") as loop_file: + fd = loop_file.fileno() + loop_obj_path = manager.LoopSetup(fd, self.no_options, + dbus_interface=self.iface_prefix + '.Manager') + + self.assertTrue(loop_obj_path) + self.assertTrue(loop_obj_path.startswith(self.path_prefix)) + + loop_obj = self.bus.get_object(self.iface_prefix, loop_obj_path) + self.assertIsNotNone(loop_obj) + device_file = self.ay_to_str(self.get_property_raw(loop_obj, '.Block', 'Device')) + self.assertTrue(device_file.startswith('/dev/loop')) + + # create PV, VG and a thin pool + ret, _out = self.run_command('pvcreate %s' % device_file) + self.assertEqual(ret, 0) + ret, _out = self.run_command('vgcreate udskthnvg %s' % device_file) + self.assertEqual(ret, 0) + ret, _out = self.run_command('lvcreate -n udskthnpl0 -L 8G udskthnvg') + self.assertEqual(ret, 0) + ret, _out = self.run_command('lvcreate -n udskthnpl0m -L 850M udskthnvg') + self.assertEqual(ret, 0) + ret, _out = self.run_command('lvconvert --type thin-pool --poolmetadata udskthnvg/udskthnpl0m udskthnvg/udskthnpl0 --yes') + self.assertEqual(ret, 0) + ret, _out = self.run_command('lvcreate -n udskthin0 -V 1T --thinpool udskthnpl0 udskthnvg') + self.assertEqual(ret, 0) + + # create snapshots + for i in range(self.NUM_SNAPS): + ret, _out = self.run_command('lvcreate -n udskthin0s%.4d --snapshot udskthnvg/udskthin0' % i) + self.assertEqual(ret, 0) + ret, _out = self.run_command('lvchange -ay -K udskthnvg/udskthin0s%.4d' % i) + self.assertEqual(ret, 0) + + + @udiskstestcase.tag_test(udiskstestcase.TestTags.LOADTEST) + def test_02_teardown_1000_snapshots(self): + ''' Teardown previously created snapshots ''' + + self.assertTrue(os.path.exists(self.IMG_FILE)) + + # find existing loop object that matches the backing file + loop_obj_path = self.find_loop_for_backing_file(self.IMG_FILE) + self.assertIsNotNone(loop_obj_path) + + # remove all the LVM stuff at once + self.run_command('vgremove udskthnvg --yes') + + # detach and remove the loop device + loop_obj = self.get_object(loop_obj_path) + loop_obj.Delete(self.no_options, + dbus_interface=self.iface_prefix + '.Loop') + + os.remove(self.IMG_FILE) From 677f82d7aa59dafaf4cc58fd3163b870f807fd4e Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Fri, 9 Oct 2020 17:56:26 +0200 Subject: [PATCH 03/11] udiskslinuxprovider: Add flag for a secondary coldplug Just like a primary coldplug that is run upon daemon startup this secondary coldplug indicates modules a secondary initialization over existing drive and block objects. Upon activating a module it's desirable to attach extra module interfaces over existing objects, perform additional probing and expose all available information in a synchronous fashion so that upon retuning from the org.freedesktop.UDisks2.Manager.EnableModule() method call all exported objects and their properties are up-to-date. --- doc/udisks2-sections.txt.daemon.sections.in | 1 + src/udiskslinuxprovider.c | 25 +++++++++++++++++++-- src/udiskslinuxprovider.h | 1 + 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/doc/udisks2-sections.txt.daemon.sections.in b/doc/udisks2-sections.txt.daemon.sections.in index 26c3c2cdff..856c581e5e 100644 --- a/doc/udisks2-sections.txt.daemon.sections.in +++ b/doc/udisks2-sections.txt.daemon.sections.in @@ -196,6 +196,7 @@ UDisksLinuxProvider udisks_linux_provider_new udisks_linux_provider_get_udev_client udisks_linux_provider_get_coldplug +udisks_linux_provider_get_modules_coldplug UDISKS_TYPE_LINUX_PROVIDER UDISKS_LINUX_PROVIDER diff --git a/src/udiskslinuxprovider.c b/src/udiskslinuxprovider.c index c1447bc765..1ee81b3fdf 100644 --- a/src/udiskslinuxprovider.c +++ b/src/udiskslinuxprovider.c @@ -90,6 +90,7 @@ struct _UDisksLinuxProvider /* set to TRUE only in the coldplug phase */ gboolean coldplug; + gboolean modules_coldplug; guint housekeeping_timeout; guint64 housekeeping_last; @@ -576,11 +577,13 @@ ensure_modules (UDisksLinuxProvider *provider) } /* Perform coldplug */ - udisks_debug ("Performing coldplug..."); + udisks_debug ("Performing secondary/modules coldplug..."); udisks_devices = get_udisks_devices (provider); + provider->modules_coldplug = TRUE; do_coldplug (provider, udisks_devices); + provider->modules_coldplug = FALSE; g_list_free_full (udisks_devices, g_object_unref); - udisks_debug ("Coldplug complete"); + udisks_debug ("Secondary/modules coldplug complete"); } /* @@ -851,6 +854,24 @@ udisks_linux_provider_get_coldplug (UDisksLinuxProvider *provider) return provider->coldplug; } +/** + * udisks_linux_provider_get_modules_coldplug: + * @provider: A #UDisksLinuxProvider. + * + * Gets whether @provider is in the secondary coldplug phase as a result + * of module(s) being activated. This "modules coldplug" phase is intended + * for synchronous additional module interfaces initialization over + * an already initialized (coldplugged) base drive or block object. + * + * Returns: %TRUE if in the secondary coldplug phase, %FALSE otherwise. + **/ +gboolean +udisks_linux_provider_get_modules_coldplug (UDisksLinuxProvider *provider) +{ + g_return_val_if_fail (UDISKS_IS_LINUX_PROVIDER (provider), FALSE); + return provider->modules_coldplug; +} + /* ---------------------------------------------------------------------------------------------------- */ static void diff --git a/src/udiskslinuxprovider.h b/src/udiskslinuxprovider.h index 9980f0bc0c..f190a349c1 100644 --- a/src/udiskslinuxprovider.h +++ b/src/udiskslinuxprovider.h @@ -34,6 +34,7 @@ GType udisks_linux_provider_get_type (void) G_GNUC_CONST UDisksLinuxProvider *udisks_linux_provider_new (UDisksDaemon *daemon); GUdevClient *udisks_linux_provider_get_udev_client (UDisksLinuxProvider *provider); gboolean udisks_linux_provider_get_coldplug (UDisksLinuxProvider *provider); +gboolean udisks_linux_provider_get_modules_coldplug (UDisksLinuxProvider *provider); G_END_DECLS From f53a6cfc86caaf465e2ee9c59c506ad21497d382 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Fri, 9 Oct 2020 18:09:24 +0200 Subject: [PATCH 04/11] lvm2: Make use of the secondary coldplug flag It is possible to distinguish a coldplug phase now so remove the dirty workaround. This will change timing of object properties validity and availability of additional objects at the expense of longer coldplug phase. --- modules/lvm2/udiskslinuxmodulelvm2.c | 32 +++++++++++++++++----------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/modules/lvm2/udiskslinuxmodulelvm2.c b/modules/lvm2/udiskslinuxmodulelvm2.c index 8e1ea13aec..b95b13057f 100644 --- a/modules/lvm2/udiskslinuxmodulelvm2.c +++ b/modules/lvm2/udiskslinuxmodulelvm2.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -58,7 +59,6 @@ struct _UDisksLinuxModuleLVM2 { GHashTable *name_to_volume_group; gint delayed_update_id; - gboolean coldplug_done; }; typedef struct _UDisksLinuxModuleLVM2Class UDisksLinuxModuleLVM2Class; @@ -85,7 +85,6 @@ udisks_linux_module_lvm2_constructed (GObject *object) UDisksLinuxModuleLVM2 *module = UDISKS_LINUX_MODULE_LVM2 (object); module->name_to_volume_group = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify) g_object_unref); - module->coldplug_done = FALSE; if (G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed) G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed (object); @@ -299,7 +298,7 @@ lvm_update_vgs (GObject *source_obj, } static void -lvm_update (UDisksLinuxModuleLVM2 *module) +lvm_update (UDisksLinuxModuleLVM2 *module, gboolean coldplug) { GTask *task; @@ -310,7 +309,11 @@ lvm_update (UDisksLinuxModuleLVM2 *module) NULL /* callback_data */); /* holds a reference to 'task' until it is finished */ - g_task_run_in_thread (task, (GTaskThreadFunc) vgs_task_func); + if (coldplug) + g_task_run_in_thread_sync (task, (GTaskThreadFunc) vgs_task_func); + else + g_task_run_in_thread (task, (GTaskThreadFunc) vgs_task_func); + g_object_unref (task); } @@ -319,7 +322,7 @@ delayed_lvm_update (gpointer user_data) { UDisksLinuxModuleLVM2 *module = UDISKS_LINUX_MODULE_LVM2 (user_data); - lvm_update (module); + lvm_update (module, FALSE); module->delayed_update_id = 0; return FALSE; @@ -328,18 +331,21 @@ delayed_lvm_update (gpointer user_data) static void trigger_delayed_lvm_update (UDisksLinuxModuleLVM2 *module) { + UDisksDaemon *daemon; + UDisksLinuxProvider *provider; + if (module->delayed_update_id > 0) return; - if (! module->coldplug_done) + daemon = udisks_module_get_daemon (UDISKS_MODULE (module)); + provider = udisks_daemon_get_linux_provider (daemon); + + if (udisks_linux_provider_get_coldplug (provider) || + udisks_linux_provider_get_modules_coldplug (provider)) { - /* Update immediately when doing coldplug, i.e. when lvm2 module has just - * been activated. This is not 100% effective as this affects only the - * first request but from the plugin nature we don't know whether - * coldplugging has been finished or not. Might be subject to change in - * the future. */ - module->coldplug_done = TRUE; - lvm_update (module); + /* Update immediately in a synchronous fashion when doing coldplug, + * i.e. when lvm2 module has just been activated. */ + lvm_update (module, TRUE); } else { From e9835623badb6a530799c16e4b4edaba2985a5b9 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Mon, 12 Oct 2020 16:48:29 +0200 Subject: [PATCH 05/11] udiskslinuxdevice: Save timestamp of a uevent for the device As long as GUdevDevice doesn't carry timestamp of the reported uevent, let's use system monotonic time and bind a timestamp to it as soon as possible. --- src/udiskslinuxdevice.c | 6 +++++- src/udiskslinuxdevice.h | 4 +++- src/udiskslinuxprovider.c | 9 +++++++-- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/udiskslinuxdevice.c b/src/udiskslinuxdevice.c index 9dcf5f4717..e83e69d44f 100644 --- a/src/udiskslinuxdevice.c +++ b/src/udiskslinuxdevice.c @@ -97,6 +97,7 @@ static gboolean probe_ata (UDisksLinuxDevice *device, /** * udisks_linux_device_new_sync: * @udev_device: A #GUdevDevice. + * @timestamp: Monotonic time of the @udev_device first appearance. * * Creates a new #UDisksLinuxDevice from @udev_device which includes * probing the device for more information, if applicable. @@ -107,15 +108,18 @@ static gboolean probe_ata (UDisksLinuxDevice *device, * Returns: A #UDisksLinuxDevice. */ UDisksLinuxDevice * -udisks_linux_device_new_sync (GUdevDevice *udev_device) +udisks_linux_device_new_sync (GUdevDevice *udev_device, + gint64 timestamp) { UDisksLinuxDevice *device; GError *error = NULL; g_return_val_if_fail (G_UDEV_IS_DEVICE (udev_device), NULL); + g_return_val_if_fail (timestamp > 0, NULL); device = g_object_new (UDISKS_TYPE_LINUX_DEVICE, NULL); device->udev_device = g_object_ref (udev_device); + device->timestamp = timestamp; /* No point in probing on remove events */ if (!(g_strcmp0 (g_udev_device_get_action (udev_device), "remove") == 0)) diff --git a/src/udiskslinuxdevice.h b/src/udiskslinuxdevice.h index 38d9636118..c29765469b 100644 --- a/src/udiskslinuxdevice.h +++ b/src/udiskslinuxdevice.h @@ -48,10 +48,12 @@ struct _UDisksLinuxDevice GUdevDevice *udev_device; guchar *ata_identify_device_data; guchar *ata_identify_packet_device_data; + gint64 timestamp; }; GType udisks_linux_device_get_type (void) G_GNUC_CONST; -UDisksLinuxDevice *udisks_linux_device_new_sync (GUdevDevice *udev_device); +UDisksLinuxDevice *udisks_linux_device_new_sync (GUdevDevice *udev_device, + gint64 timestamp); gboolean udisks_linux_device_reprobe_sync (UDisksLinuxDevice *device, GCancellable *cancellable, GError **error); diff --git a/src/udiskslinuxprovider.c b/src/udiskslinuxprovider.c index 1ee81b3fdf..ad27e3befa 100644 --- a/src/udiskslinuxprovider.c +++ b/src/udiskslinuxprovider.c @@ -223,6 +223,7 @@ typedef struct UDisksLinuxProvider *provider; GUdevDevice *udev_device; UDisksLinuxDevice *udisks_device; + gint64 timestamp; } ProbeRequest; static void @@ -291,7 +292,8 @@ probe_request_thread_func (gpointer user_data) } /* probe the device - this may take a while */ - request->udisks_device = udisks_linux_device_new_sync (request->udev_device); + request->udisks_device = udisks_linux_device_new_sync (request->udev_device, + request->timestamp); /* now that we've probed the device, post the request back to the main thread */ g_idle_add (on_idle_with_probed_uevent, request); @@ -314,6 +316,7 @@ on_uevent (GUdevClient *client, ProbeRequest *request; request = g_slice_new0 (ProbeRequest); + request->timestamp = g_get_monotonic_time (); request->provider = g_object_ref (provider); request->udev_device = g_object_ref (device); @@ -484,7 +487,9 @@ get_udisks_devices (UDisksLinuxProvider *provider) GList *devices; GList *udisks_devices; GList *l; + gint64 timestamp; + timestamp = g_get_monotonic_time (); devices = g_udev_client_query_by_subsystem (provider->gudev_client, "block"); /* make sure we process sda before sdz and sdz before sdaa */ @@ -496,7 +501,7 @@ get_udisks_devices (UDisksLinuxProvider *provider) GUdevDevice *device = G_UDEV_DEVICE (l->data); if (!g_udev_device_get_is_initialized (device)) continue; - udisks_devices = g_list_prepend (udisks_devices, udisks_linux_device_new_sync (device)); + udisks_devices = g_list_prepend (udisks_devices, udisks_linux_device_new_sync (device, timestamp)); } udisks_devices = g_list_reverse (udisks_devices); g_list_free_full (devices, g_object_unref); From f124da4f15e953427421abb72af417e757fdc12a Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Mon, 12 Oct 2020 17:14:45 +0200 Subject: [PATCH 06/11] udiskslinuxprovider: Store timestamp of a last uevent received Betting on natural atomicity of the 64-bit value, may need explicit measures to be added around (TODO). --- doc/udisks2-sections.txt.daemon.sections.in | 1 + src/udiskslinuxprovider.c | 24 ++++++++++++++++++++- src/udiskslinuxprovider.h | 1 + 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/doc/udisks2-sections.txt.daemon.sections.in b/doc/udisks2-sections.txt.daemon.sections.in index 856c581e5e..1640984b2b 100644 --- a/doc/udisks2-sections.txt.daemon.sections.in +++ b/doc/udisks2-sections.txt.daemon.sections.in @@ -197,6 +197,7 @@ udisks_linux_provider_new udisks_linux_provider_get_udev_client udisks_linux_provider_get_coldplug udisks_linux_provider_get_modules_coldplug +udisks_linux_provider_get_last_uevent UDISKS_TYPE_LINUX_PROVIDER UDISKS_LINUX_PROVIDER diff --git a/src/udiskslinuxprovider.c b/src/udiskslinuxprovider.c index ad27e3befa..d38b7d48fe 100644 --- a/src/udiskslinuxprovider.c +++ b/src/udiskslinuxprovider.c @@ -64,6 +64,7 @@ struct _UDisksLinuxProvider GUdevClient *gudev_client; GAsyncQueue *probe_request_queue; GThread *probe_request_thread; + gint64 last_uevent_timestamp; UDisksObjectSkeleton *manager_object; @@ -314,9 +315,13 @@ on_uevent (GUdevClient *client, { UDisksLinuxProvider *provider = UDISKS_LINUX_PROVIDER (user_data); ProbeRequest *request; + gint64 timestamp; + + timestamp = g_get_monotonic_time (); + provider->last_uevent_timestamp = timestamp; request = g_slice_new0 (ProbeRequest); - request->timestamp = g_get_monotonic_time (); + request->timestamp = timestamp; request->provider = g_object_ref (provider); request->udev_device = g_object_ref (device); @@ -346,6 +351,7 @@ udisks_linux_provider_constructed (GObject *object) /* get ourselves an udev client */ provider->gudev_client = g_udev_client_new (subsystems); + provider->last_uevent_timestamp = g_get_monotonic_time (); g_signal_connect (provider->gudev_client, "uevent", @@ -877,6 +883,22 @@ udisks_linux_provider_get_modules_coldplug (UDisksLinuxProvider *provider) return provider->modules_coldplug; } +/** + * udisks_linux_provider_get_last_uevent: + * @provider: A #UDisksLinuxProvider. + * + * Gets timestamp of a last uevent received. + * + * Returns: monotonic time of a last uevent. + **/ +gint64 +udisks_linux_provider_get_last_uevent (UDisksLinuxProvider *provider) +{ + g_return_val_if_fail (UDISKS_IS_LINUX_PROVIDER (provider), 0); + /* TODO: do we need to assure atomicity? */ + return provider->last_uevent_timestamp; +} + /* ---------------------------------------------------------------------------------------------------- */ static void diff --git a/src/udiskslinuxprovider.h b/src/udiskslinuxprovider.h index f190a349c1..a37e11ad04 100644 --- a/src/udiskslinuxprovider.h +++ b/src/udiskslinuxprovider.h @@ -35,6 +35,7 @@ UDisksLinuxProvider *udisks_linux_provider_new (UDisksDaemon GUdevClient *udisks_linux_provider_get_udev_client (UDisksLinuxProvider *provider); gboolean udisks_linux_provider_get_coldplug (UDisksLinuxProvider *provider); gboolean udisks_linux_provider_get_modules_coldplug (UDisksLinuxProvider *provider); +gint64 udisks_linux_provider_get_last_uevent (UDisksLinuxProvider *provider); G_END_DECLS From ddf857a4375cfbae194fb66296f3f578ce9a86f5 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Wed, 21 Oct 2020 17:03:54 +0200 Subject: [PATCH 07/11] lvm2: Run update sync on coldplugging Updates should be done sync while coldplugging and async updates should be rate limited. This is done by comparing timestamps of (filtered) uevents, keeping the update job limited to a single instance, optionally queueing another update once the previous one finished. We rely on uevents being generated for any change to lvm2 block devices, otherwise this wouldn't work. --- modules/lvm2/udiskslinuxmodulelvm2.c | 119 +++++++++++++++----- modules/lvm2/udiskslinuxvolumegroupobject.c | 12 +- modules/lvm2/udiskslinuxvolumegroupobject.h | 3 +- 3 files changed, 100 insertions(+), 34 deletions(-) diff --git a/modules/lvm2/udiskslinuxmodulelvm2.c b/modules/lvm2/udiskslinuxmodulelvm2.c index b95b13057f..993e7b184f 100644 --- a/modules/lvm2/udiskslinuxmodulelvm2.c +++ b/modules/lvm2/udiskslinuxmodulelvm2.c @@ -58,7 +58,8 @@ struct _UDisksLinuxModuleLVM2 { /* maps from volume group name to UDisksLinuxVolumeGroupObject instances. */ GHashTable *name_to_volume_group; - gint delayed_update_id; + gint64 last_update_requested; + GTask *update_task; }; typedef struct _UDisksLinuxModuleLVM2Class UDisksLinuxModuleLVM2Class; @@ -85,6 +86,7 @@ udisks_linux_module_lvm2_constructed (GObject *object) UDisksLinuxModuleLVM2 *module = UDISKS_LINUX_MODULE_LVM2 (object); module->name_to_volume_group = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify) g_object_unref); + module->last_update_requested = 0; if (G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed) G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed (object); @@ -95,6 +97,8 @@ udisks_linux_module_lvm2_finalize (GObject *object) { UDisksLinuxModuleLVM2 *module = UDISKS_LINUX_MODULE_LVM2 (object); + /* Note: won't be called until module->update_task finishes */ + g_hash_table_unref (module->name_to_volume_group); if (G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->finalize) @@ -201,6 +205,13 @@ udisks_linux_module_lvm2_new_manager (UDisksModule *module) /* ---------------------------------------------------------------------------------------------------- */ +typedef struct { + gint64 task_timestamp; + gboolean sync_task; +} LVMUpdateTaskData; + +static gboolean delayed_lvm_update (gpointer user_data); + static void lvm_update_vgs (GObject *source_obj, GAsyncResult *result, @@ -213,12 +224,21 @@ lvm_update_vgs (GObject *source_obj, GTask *task = G_TASK (result); GError *error = NULL; VGsPVsData *data = g_task_propagate_pointer (task, &error); + LVMUpdateTaskData *task_data = user_data; BDLVMVGdata **vgs, **vgs_p; BDLVMPVdata **pvs, **pvs_p; GHashTableIter vg_name_iter; gpointer key, value; const gchar *vg_name; + gint64 task_timestamp; + gboolean sync_task; + + g_warn_if_fail (task_data != NULL); + + task_timestamp = task_data->task_timestamp; + sync_task = task_data->sync_task; + g_free (task_data); if (! data) { @@ -232,6 +252,10 @@ lvm_update_vgs (GObject *source_obj, /* this should never happen */ udisks_warning ("LVM2 plugin: failure but no error when getting VGs!"); } + g_clear_object (&module->update_task); + /* queue new task if a new uevent has been received during the task processing time */ + if (!sync_task && task_timestamp < module->last_update_requested) + g_idle_add (delayed_lvm_update, module); return; } vgs = data->vgs; @@ -282,7 +306,7 @@ lvm_update_vgs (GObject *source_obj, if (g_strcmp0 ((*pvs_p)->vg_name, vg_name) == 0) vg_pvs = g_slist_prepend (vg_pvs, bd_lvm_pvdata_copy (*pvs_p)); - udisks_linux_volume_group_object_update (group, *vgs_p, vg_pvs); + udisks_linux_volume_group_object_update (group, *vgs_p, vg_pvs, sync_task); } /* UDisksLinuxVolumeGroupObject carries copies of BDLVMPVdata that belong to the VG. @@ -295,26 +319,65 @@ lvm_update_vgs (GObject *source_obj, /* only free the containers, the contents were passed further */ g_free (vgs); g_free (pvs); + + /* we hold a reference to the task */ + g_clear_object (&module->update_task); + + /* If this update was sync, it was blocking the main (uevent processing) thread and + * there was no chance the module->last_update_requested timestamp would change. */ + if (!sync_task && task_timestamp < module->last_update_requested) + { + /* Further uevents have been received while the update task was running, + * queue a new update. */ + g_idle_add (delayed_lvm_update, module); + } } static void -lvm_update (UDisksLinuxModuleLVM2 *module, gboolean coldplug) +lvm_update (UDisksLinuxModuleLVM2 *module, gint64 timestamp, gboolean coldplug, gboolean force_update) { - GTask *task; + UDisksDaemon *daemon; + UDisksLinuxProvider *provider; + LVMUpdateTaskData *task_data; + + daemon = udisks_module_get_daemon (UDISKS_MODULE (module)); + provider = udisks_daemon_get_linux_provider (daemon); + + if (!force_update && udisks_linux_provider_get_last_uevent (provider) <= module->last_update_requested) + { + udisks_debug ("lvm2: no uevent received since last update, skipping"); + return; + } + + /* store timestamp of a last update requested */ + module->last_update_requested = timestamp; + if (module->update_task) + { + udisks_debug ("lvm2: update already in progress, will queue another one once finished"); + return; + } + + task_data = g_new0 (LVMUpdateTaskData, 1); + task_data->task_timestamp = module->last_update_requested; /* the callback (lvm_update_vgs) is called in the default main loop (context) */ - task = g_task_new (module, - NULL /* cancellable */, - lvm_update_vgs, - NULL /* callback_data */); + module->update_task = g_task_new (module, + NULL /* cancellable */, + lvm_update_vgs, + task_data /* callback_data */); - /* holds a reference to 'task' until it is finished */ + /* the callback is responsible for releasing the task reference */ if (coldplug) - g_task_run_in_thread_sync (task, (GTaskThreadFunc) vgs_task_func); + { + task_data->sync_task = TRUE; + g_task_run_in_thread_sync (module->update_task, (GTaskThreadFunc) vgs_task_func); + lvm_update_vgs (G_OBJECT (module), G_ASYNC_RESULT (module->update_task), task_data); + } else - g_task_run_in_thread (task, (GTaskThreadFunc) vgs_task_func); - - g_object_unref (task); + { + task_data->sync_task = FALSE; + g_task_run_in_thread (module->update_task, (GTaskThreadFunc) vgs_task_func); + } } static gboolean @@ -322,35 +385,28 @@ delayed_lvm_update (gpointer user_data) { UDisksLinuxModuleLVM2 *module = UDISKS_LINUX_MODULE_LVM2 (user_data); - lvm_update (module, FALSE); - module->delayed_update_id = 0; + udisks_debug ("lvm2: spawning another update due to incoming uevent during last update"); + + /* delayed updates are always async */ + lvm_update (module, g_get_monotonic_time (), FALSE, TRUE); return FALSE; } static void -trigger_delayed_lvm_update (UDisksLinuxModuleLVM2 *module) +trigger_delayed_lvm_update (UDisksLinuxModuleLVM2 *module, gint64 timestamp) { UDisksDaemon *daemon; UDisksLinuxProvider *provider; - - if (module->delayed_update_id > 0) - return; + gboolean coldplug; daemon = udisks_module_get_daemon (UDISKS_MODULE (module)); provider = udisks_daemon_get_linux_provider (daemon); - if (udisks_linux_provider_get_coldplug (provider) || - udisks_linux_provider_get_modules_coldplug (provider)) - { - /* Update immediately in a synchronous fashion when doing coldplug, - * i.e. when lvm2 module has just been activated. */ - lvm_update (module, TRUE); - } - else - { - module->delayed_update_id = g_timeout_add (100, delayed_lvm_update, module); - } + coldplug = udisks_linux_provider_get_coldplug (provider) || + udisks_linux_provider_get_modules_coldplug (provider); + + lvm_update (module, timestamp, coldplug, FALSE); } static gboolean @@ -387,6 +443,7 @@ is_recorded_as_physical_volume (UDisksLinuxModuleLVM2 *module, return ret; } +/* should only be called from the main thread */ static GDBusObjectSkeleton ** udisks_linux_module_lvm2_new_object (UDisksModule *module, UDisksLinuxDevice *device) @@ -403,7 +460,7 @@ udisks_linux_module_lvm2_new_object (UDisksModule *module, if (is_logical_volume (device) || has_physical_volume_label (device) || is_recorded_as_physical_volume (UDISKS_LINUX_MODULE_LVM2 (module), device)) - trigger_delayed_lvm_update (UDISKS_LINUX_MODULE_LVM2 (module)); + trigger_delayed_lvm_update (UDISKS_LINUX_MODULE_LVM2 (module), device->timestamp); return NULL; } diff --git a/modules/lvm2/udiskslinuxvolumegroupobject.c b/modules/lvm2/udiskslinuxvolumegroupobject.c index e3bc8dfc0b..ab2fdee698 100644 --- a/modules/lvm2/udiskslinuxvolumegroupobject.c +++ b/modules/lvm2/udiskslinuxvolumegroupobject.c @@ -702,7 +702,7 @@ update_vg (GObject *source_obj, } void -udisks_linux_volume_group_object_update (UDisksLinuxVolumeGroupObject *object, BDLVMVGdata *vg_info, GSList *pvs) +udisks_linux_volume_group_object_update (UDisksLinuxVolumeGroupObject *object, BDLVMVGdata *vg_info, GSList *pvs, gboolean update_sync) { VGUpdateData *data = g_new0 (VGUpdateData, 1); gchar *vg_name = g_strdup (vg_info->name); @@ -716,7 +716,15 @@ udisks_linux_volume_group_object_update (UDisksLinuxVolumeGroupObject *object, B g_task_set_task_data (task, vg_name, g_free); /* holds a reference to 'task' until it is finished */ - g_task_run_in_thread (task, (GTaskThreadFunc) lvs_task_func); + if (update_sync) + { + g_task_run_in_thread_sync (task, (GTaskThreadFunc) lvs_task_func); + update_vg (G_OBJECT (object), G_ASYNC_RESULT (task), data); + } + else + { + g_task_run_in_thread (task, (GTaskThreadFunc) lvs_task_func); + } g_object_unref (task); } diff --git a/modules/lvm2/udiskslinuxvolumegroupobject.h b/modules/lvm2/udiskslinuxvolumegroupobject.h index 01b6e821df..5d583d1428 100644 --- a/modules/lvm2/udiskslinuxvolumegroupobject.h +++ b/modules/lvm2/udiskslinuxvolumegroupobject.h @@ -40,7 +40,8 @@ const gchar *udisks_linux_volume_group_object_get_name ( UDisksLinuxModuleLVM2 *udisks_linux_volume_group_object_get_module (UDisksLinuxVolumeGroupObject *object); void udisks_linux_volume_group_object_update (UDisksLinuxVolumeGroupObject *object, BDLVMVGdata *vginfo, - GSList *pvs); + GSList *pvs, + gboolean update_sync); void udisks_linux_volume_group_object_poll (UDisksLinuxVolumeGroupObject *object); From 94a3c25f7586613d466763330b9464b34b614713 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Wed, 21 Oct 2020 18:31:23 +0200 Subject: [PATCH 08/11] lvm2: Implement 'scalable mode' for large number of LVs in the system For consistency reasons we would like to run updates sync, however with large number of LVs in the system this can take considerable amount of time, blocking the serialized daemon uevent processing. As a workaround this change introduces an arbitrary threshold that will switch updates back to async as was historically observed. --- modules/lvm2/udiskslinuxmodulelvm2.c | 19 ++++++++++++++++++- modules/lvm2/udiskslinuxvolumegroupobject.c | 17 +++++++++++++++++ modules/lvm2/udiskslinuxvolumegroupobject.h | 1 + modules/lvm2/udiskslvm2types.h | 8 ++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) diff --git a/modules/lvm2/udiskslinuxmodulelvm2.c b/modules/lvm2/udiskslinuxmodulelvm2.c index 993e7b184f..563f494904 100644 --- a/modules/lvm2/udiskslinuxmodulelvm2.c +++ b/modules/lvm2/udiskslinuxmodulelvm2.c @@ -60,6 +60,8 @@ struct _UDisksLinuxModuleLVM2 { gint64 last_update_requested; GTask *update_task; + + gboolean scalable_mode; }; typedef struct _UDisksLinuxModuleLVM2Class UDisksLinuxModuleLVM2Class; @@ -87,6 +89,7 @@ udisks_linux_module_lvm2_constructed (GObject *object) module->name_to_volume_group = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify) g_object_unref); module->last_update_requested = 0; + module->scalable_mode = FALSE; if (G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed) G_OBJECT_CLASS (udisks_linux_module_lvm2_parent_class)->constructed (object); @@ -233,6 +236,7 @@ lvm_update_vgs (GObject *source_obj, const gchar *vg_name; gint64 task_timestamp; gboolean sync_task; + guint lv_count = 0; g_warn_if_fail (task_data != NULL); @@ -307,6 +311,19 @@ lvm_update_vgs (GObject *source_obj, vg_pvs = g_slist_prepend (vg_pvs, bd_lvm_pvdata_copy (*pvs_p)); udisks_linux_volume_group_object_update (group, *vgs_p, vg_pvs, sync_task); + lv_count += udisks_linux_volume_group_object_get_lv_count (group); + } + + if (lv_count > LVM2_SCALABLE_MODE_THRESHOLD) + { + if (! module->scalable_mode) + udisks_warning ("lvm2: Total of %u logical volumes detected in the system, switching the scalable mode on.", lv_count); + module->scalable_mode = TRUE; + } + else + { + /* stepping down from scalable mode is currently allowed */ + module->scalable_mode = FALSE; } /* UDisksLinuxVolumeGroupObject carries copies of BDLVMPVdata that belong to the VG. @@ -367,7 +384,7 @@ lvm_update (UDisksLinuxModuleLVM2 *module, gint64 timestamp, gboolean coldplug, task_data /* callback_data */); /* the callback is responsible for releasing the task reference */ - if (coldplug) + if (coldplug || !module->scalable_mode) { task_data->sync_task = TRUE; g_task_run_in_thread_sync (module->update_task, (GTaskThreadFunc) vgs_task_func); diff --git a/modules/lvm2/udiskslinuxvolumegroupobject.c b/modules/lvm2/udiskslinuxvolumegroupobject.c index ab2fdee698..0be2ce9462 100644 --- a/modules/lvm2/udiskslinuxvolumegroupobject.c +++ b/modules/lvm2/udiskslinuxvolumegroupobject.c @@ -904,3 +904,20 @@ udisks_linux_volume_group_object_get_name (UDisksLinuxVolumeGroupObject *object) g_return_val_if_fail (UDISKS_IS_LINUX_VOLUME_GROUP_OBJECT (object), NULL); return object->name; } + +/** + * udisks_linux_volume_group_object_get_lv_count: + * @object: A #UDisksLinuxVolumeGroupObject. + * + * Gets the number of logical volumes the volume group contains. + * + * Returns: The number of logical volumes for the object. + */ +guint +udisks_linux_volume_group_object_get_lv_count (UDisksLinuxVolumeGroupObject *object) +{ + g_return_val_if_fail (UDISKS_IS_LINUX_VOLUME_GROUP_OBJECT (object), 0); + g_assert (object->logical_volumes != NULL); + + return g_hash_table_size (object->logical_volumes); +} diff --git a/modules/lvm2/udiskslinuxvolumegroupobject.h b/modules/lvm2/udiskslinuxvolumegroupobject.h index 5d583d1428..735aeeec02 100644 --- a/modules/lvm2/udiskslinuxvolumegroupobject.h +++ b/modules/lvm2/udiskslinuxvolumegroupobject.h @@ -42,6 +42,7 @@ void udisks_linux_volume_group_object_update ( BDLVMVGdata *vginfo, GSList *pvs, gboolean update_sync); +guint udisks_linux_volume_group_object_get_lv_count (UDisksLinuxVolumeGroupObject *object); void udisks_linux_volume_group_object_poll (UDisksLinuxVolumeGroupObject *object); diff --git a/modules/lvm2/udiskslvm2types.h b/modules/lvm2/udiskslvm2types.h index 7741ded30b..57e3e1bf72 100644 --- a/modules/lvm2/udiskslvm2types.h +++ b/modules/lvm2/udiskslvm2types.h @@ -25,6 +25,14 @@ #define LVM2_MODULE_NAME "lvm2" #define LVM2_POLICY_ACTION_ID "org.freedesktop.udisks2.lvm2.manage-lvm" +/** + * LVM2_SCALABLE_MODE_THRESHOLD: + * + * Number of logical volumes detected in the system needed to + * switch the scalable mode on. + */ +#define LVM2_SCALABLE_MODE_THRESHOLD 100 + struct _UDisksLinuxModuleLVM2; typedef struct _UDisksLinuxModuleLVM2 UDisksLinuxModuleLVM2; From 2238da5a6ae739e48d8c89f4150e763644a15dc7 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Mon, 26 Oct 2020 15:41:25 +0100 Subject: [PATCH 09/11] tests: Remove lingering lvm2 VG entries There's a known bug somewhere between lvm2 and udev, keeping /dev entries around while the block devices are actually gone. This causes problems on subsequent test runs while it seems nothing else breaks down. --- src/tests/dbus-tests/test_20_LVM.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/tests/dbus-tests/test_20_LVM.py b/src/tests/dbus-tests/test_20_LVM.py index 4c9b3df999..8e775a234f 100644 --- a/src/tests/dbus-tests/test_20_LVM.py +++ b/src/tests/dbus-tests/test_20_LVM.py @@ -1,6 +1,7 @@ import dbus import os import re +import shutil import time import unittest @@ -55,6 +56,8 @@ def _remove_vg(self, vg, tear_down=False, ignore_removed=False): vg.Delete(True, options, dbus_interface=self.iface_prefix + '.VolumeGroup') ret, _out = self.run_command('vgs %s' % vgname) self.assertNotEqual(ret, 0) + # remove lingering /dev entries + shutil.rmtree(os.path.join('/dev', vgname), ignore_errors=True) except dbus.exceptions.DBusException as e: if not ignore_removed: raise e From 99bd6dcf064e66b516124b18e0a12bbd2e91e281 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Mon, 26 Oct 2020 15:58:09 +0100 Subject: [PATCH 10/11] udiskslinuxprovider: Reorder uevent processing for modules This is a major change and will need to be followed up to make things right. This essientially reorders the moment the uevent is propagated to modules and may affect availability of information on exposed D-Bus objects. This is all about dependencies, i.e. in case a module gathers information from existing block objects, it needs to know whether that block object has already finished processing the particular uevent or not. This commit defines that order and it appears to be sensible to guarantee the "core" daemon objects and its interfaces are updated by the time udisks_module_new_object() is called. That happens as a last step of uevent processing chain which includes block object specific module callbacks for exposing extra D-Bus interfaces (run prior to udisks_module_new_object()). However it may not suit all cases and would be perhaps wise to provide pre- and post- udisks_module_new_object() callbacks for finer granularity and precise definition of the moment of uevent processing. For the moment let's run udisks_module_new_object() as the last in the chain and make modules aware of that. --- src/udiskslinuxprovider.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/udiskslinuxprovider.c b/src/udiskslinuxprovider.c index d38b7d48fe..81422130fd 100644 --- a/src/udiskslinuxprovider.c +++ b/src/udiskslinuxprovider.c @@ -1387,6 +1387,7 @@ handle_block_uevent (UDisksLinuxProvider *provider, handle_block_uevent_for_block (provider, action, device); handle_block_uevent_for_drive (provider, action, device); handle_block_uevent_for_mdraid (provider, action, device); + /* TODO: implement two-phase pre-remove and post-remove modules callback */ handle_block_uevent_for_modules (provider, action, device); } else @@ -1404,10 +1405,11 @@ handle_block_uevent (UDisksLinuxProvider *provider, } else { - handle_block_uevent_for_modules (provider, action, device); handle_block_uevent_for_mdraid (provider, action, device); handle_block_uevent_for_drive (provider, action, device); handle_block_uevent_for_block (provider, action, device); + /* TODO: implement two-phase pre-add and post-add modules callback */ + handle_block_uevent_for_modules (provider, action, device); } } From 4efd89438b4da967a00a65442352384e8b822364 Mon Sep 17 00:00:00 2001 From: Tomas Bzatek Date: Fri, 13 Nov 2020 17:03:49 +0100 Subject: [PATCH 11/11] lvm2: Use udisks_daemon_util_trigger_uevent() This might trigger some critical warnings from the daemon as the VG/LV might not exist. Mostly harmless as until now the return code of open() wasn't actually checked. Subject to further fixes in the future. --- modules/lvm2/udiskslinuxlogicalvolume.c | 7 ++++++- modules/lvm2/udiskslvm2daemonutil.c | 8 -------- modules/lvm2/udiskslvm2daemonutil.h | 2 -- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/modules/lvm2/udiskslinuxlogicalvolume.c b/modules/lvm2/udiskslinuxlogicalvolume.c index c0f074d858..5c016337ee 100644 --- a/modules/lvm2/udiskslinuxlogicalvolume.c +++ b/modules/lvm2/udiskslinuxlogicalvolume.c @@ -148,12 +148,17 @@ udisks_linux_logical_volume_update (UDisksLinuxLogicalVolume *logical_volume gboolean *needs_polling_ret) { UDisksLogicalVolume *iface; + UDisksLinuxModuleLVM2 *module; + UDisksDaemon *daemon; const char *type; gboolean active; const char *pool_objpath; const char *origin_objpath; guint64 size = 0; + module = udisks_linux_volume_group_object_get_module (group_object); + daemon = udisks_module_get_daemon (UDISKS_MODULE (module)); + iface = UDISKS_LOGICAL_VOLUME (logical_volume); udisks_logical_volume_set_name (iface, lv_info->lv_name); @@ -225,7 +230,7 @@ udisks_linux_logical_volume_update (UDisksLinuxLogicalVolume *logical_volume * * https://www.redhat.com/archives/linux-lvm/2014-January/msg00030.html */ - udisks_daemon_util_lvm2_trigger_udev (dev_file); + udisks_daemon_util_trigger_uevent (daemon, dev_file); logical_volume->needs_udev_hack = FALSE; g_free (dev_file); } diff --git a/modules/lvm2/udiskslvm2daemonutil.c b/modules/lvm2/udiskslvm2daemonutil.c index 3c4afb92e8..ec91f53a3c 100644 --- a/modules/lvm2/udiskslvm2daemonutil.c +++ b/modules/lvm2/udiskslvm2daemonutil.c @@ -263,11 +263,3 @@ udisks_daemon_util_lvm2_name_is_reserved (const gchar *name) } /* -------------------------------------------------------------------------------- */ - -void -udisks_daemon_util_lvm2_trigger_udev (const gchar *device_file) -{ - int fd = open (device_file, O_RDWR); - if (fd >= 0) - close (fd); -} diff --git a/modules/lvm2/udiskslvm2daemonutil.h b/modules/lvm2/udiskslvm2daemonutil.h index 42f4ef20e2..5cc1a69ada 100644 --- a/modules/lvm2/udiskslvm2daemonutil.h +++ b/modules/lvm2/udiskslvm2daemonutil.h @@ -35,8 +35,6 @@ gboolean udisks_daemon_util_lvm2_wipe_block (UDisksDaemon *daemon, gboolean udisks_daemon_util_lvm2_name_is_reserved (const gchar *name); -void udisks_daemon_util_lvm2_trigger_udev (const gchar *device_file); - G_END_DECLS #endif /* __UDISKS_LVM2_DAEMON_UTIL_H__ */