diff --git a/client/operations.go b/client/operations.go index 8046ef5c3da..1b543bec080 100644 --- a/client/operations.go +++ b/client/operations.go @@ -11,6 +11,7 @@ import ( "github.com/gorilla/websocket" "github.com/lxc/incus/shared/api" + "github.com/lxc/incus/shared/logger" ) // The Operation type represents an ongoing Incus operation (asynchronous processing). @@ -120,17 +121,21 @@ func (op *operation) Wait() error { // WaitContext lets you wait until the operation reaches a final state with context.Context. func (op *operation) WaitContext(ctx context.Context) error { + logger.Errorf("stgraber: wait1") if op.skipListener { + logger.Errorf("stgraber: wait2") timeout := -1 deadline, ok := ctx.Deadline() if ok { timeout = int(time.Until(deadline).Seconds()) } + logger.Errorf("stgraber: wait3") opAPI, _, err := op.r.GetOperationWait(op.ID, timeout) if err != nil { return err } + logger.Errorf("stgraber: wait4") op.Operation = *opAPI @@ -138,10 +143,13 @@ func (op *operation) WaitContext(ctx context.Context) error { return errors.New(opAPI.Err) } + logger.Errorf("stgraber: wait5") return nil } + logger.Errorf("stgraber: wait6") op.handlerLock.Lock() + logger.Errorf("stgraber: wait7") // Check if not done already if op.StatusCode.IsFinal() { if op.Err != "" { @@ -150,28 +158,34 @@ func (op *operation) WaitContext(ctx context.Context) error { } op.handlerLock.Unlock() + logger.Errorf("stgraber: wait8") return nil } + logger.Errorf("stgraber: wait9") op.handlerLock.Unlock() + logger.Errorf("stgraber: wait10") // Make sure we have a listener setup err := op.setupListener() if err != nil { return err } + logger.Errorf("stgraber: wait11") select { case <-ctx.Done(): return ctx.Err() case <-op.chActive: } + logger.Errorf("stgraber: wait12") // We're done, parse the result if op.Err != "" { return errors.New(op.Err) } + logger.Errorf("stgraber: wait13") return nil } diff --git a/cmd/incusd/api_internal.go b/cmd/incusd/api_internal.go index c7d4a4e2ab8..31b6a71ecfd 100644 --- a/cmd/incusd/api_internal.go +++ b/cmd/incusd/api_internal.go @@ -219,6 +219,7 @@ func internalRefreshImage(d *Daemon, r *http.Request) response.Response { err := autoUpdateImages(s.ShutdownCtx, s) if err != nil { + logger.Errorf("refresh-image: %v", err) return response.SmartError(err) } diff --git a/cmd/incusd/images.go b/cmd/incusd/images.go index eeb546fd302..6a5fede463b 100644 --- a/cmd/incusd/images.go +++ b/cmd/incusd/images.go @@ -966,6 +966,7 @@ func imageCreateInPool(s *state.State, info *api.Image, storagePool string) erro // "500": // $ref: "#/responses/InternalServerError" func imagesPost(d *Daemon, r *http.Request) response.Response { + logger.Errorf("stgraber: images1") s := d.State() projectName := request.ProjectParam(r) @@ -1000,6 +1001,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { return response.Forbidden(nil) } } + logger.Errorf("stgraber: images2") instanceType, err := urlInstanceTypeDetect(r) if err != nil { @@ -1029,6 +1031,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { cleanup(builddir, nil) return response.InternalError(err) } + logger.Errorf("stgraber: images3") // Possibly set a quota on the amount of disk space this project is // allowed to use. @@ -1040,6 +1043,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { if err != nil { return response.SmartError(err) } + logger.Errorf("stgraber: images4") _, err = io.Copy(internalIO.NewQuotaWriter(post, budget), r.Body) if err != nil { @@ -1047,6 +1051,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { cleanup(builddir, post) return response.InternalError(err) } + logger.Errorf("stgraber: images5") // Is this a container request? _, err = post.Seek(0, io.SeekStart) @@ -1067,6 +1072,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { imageUpload = true } + logger.Errorf("stgraber: images6") if !imageUpload && req.Source.Mode == "push" { cleanup(builddir, post) @@ -1081,6 +1087,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { return createTokenResponse(s, r, projectName, req.Source.Fingerprint, metadata) } + logger.Errorf("stgraber: images7") if !imageUpload && !util.ValueInSlice(req.Source.Type, []string{"container", "instance", "virtual-machine", "snapshot", "image", "url"}) { cleanup(builddir, post) return response.InternalError(fmt.Errorf("Invalid images JSON")) @@ -1108,9 +1115,11 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } } } + logger.Errorf("stgraber: images8") // Begin background operation run := func(op *operations.Operation) error { + logger.Errorf("stgraber: images8a") var err error var info *api.Image @@ -1119,17 +1128,21 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { if imageUpload { /* Processing image upload */ + logger.Errorf("stgraber: images8b") info, err = getImgPostInfo(s, r, builddir, projectName, post, imageMetadata) } else { if req.Source.Type == "image" { /* Processing image copy from remote */ + logger.Errorf("stgraber: images8c") info, err = imgPostRemoteInfo(s, r, req, op, projectName, budget) } else if req.Source.Type == "url" { /* Processing image copy from URL */ + logger.Errorf("stgraber: images8d") info, err = imgPostURLInfo(s, r, req, op, projectName, budget) } else { /* Processing image creation from container */ imagePublishLock.Lock() + logger.Errorf("stgraber: images8e") info, err = imgPostInstanceInfo(s, r, req, op, builddir, budget) imagePublishLock.Unlock() } @@ -1137,6 +1150,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { // Set the metadata if possible, even if there is an error if info != nil { + logger.Errorf("stgraber: images8f") metadata := make(map[string]string) metadata["fingerprint"] = info.Fingerprint metadata["size"] = strconv.FormatInt(info.Size, 10) @@ -1148,30 +1162,38 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } _ = op.UpdateMetadata(metadata) + logger.Errorf("stgraber: images8g") } if err != nil { + logger.Errorf("stgraber: images8h") return err } if isClusterNotification(r) { // If dealing with in-cluster image copy, don't touch the database. + logger.Errorf("stgraber: images8i") return nil } // Apply any provided alias + logger.Errorf("stgraber: images8j") aliases, ok := imageMetadata["aliases"] if ok { req.Aliases = aliases.([]api.ImageAlias) } + logger.Errorf("stgraber: images8k") err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { + logger.Errorf("stgraber: images8l") imgID, _, err := tx.GetImageByFingerprintPrefix(ctx, info.Fingerprint, dbCluster.ImageFilter{Project: &projectName}) if err != nil { return fmt.Errorf("Fetch image %q: %w", info.Fingerprint, err) } + logger.Errorf("stgraber: images8m") for _, alias := range req.Aliases { + logger.Errorf("stgraber: images8n") _, _, err := tx.GetImageAlias(ctx, projectName, alias.Name, true) if !response.IsNotFoundError(err) { if err != nil { @@ -1181,6 +1203,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { return fmt.Errorf("Alias already exists: %s", alias.Name) } + logger.Errorf("stgraber: images8o") err = tx.CreateImageAlias(ctx, projectName, alias.Name, imgID, alias.Description) if err != nil { return fmt.Errorf("Add new image alias to the database: %w", err) @@ -1194,24 +1217,29 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } // Sync the images between each node in the cluster on demand + logger.Errorf("stgraber: images8p") err = imageSyncBetweenNodes(s, r, projectName, info.Fingerprint) if err != nil { return fmt.Errorf("Failed syncing image between nodes: %w", err) } // Add the image to the authorizer. + logger.Errorf("stgraber: images8q") err = s.Authorizer.AddImage(s.ShutdownCtx, projectName, info.Fingerprint) if err != nil { logger.Error("Failed to add image to authorizer", logger.Ctx{"fingerprint": info.Fingerprint, "project": projectName, "error": err}) } + logger.Errorf("stgraber: images8r") s.Events.SendLifecycle(projectName, lifecycle.ImageCreated.Event(info.Fingerprint, projectName, op.Requestor(), logger.Ctx{"type": info.Type})) + logger.Errorf("stgraber: images8s") return nil } var metadata any + logger.Errorf("stgraber: images9") if imageUpload && imageMetadata != nil { secret, _ := internalUtil.RandomHexString(32) if secret != "" { @@ -1221,11 +1249,13 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } } + logger.Errorf("stgraber: images10") op, err := operations.OperationCreate(s, projectName, operations.OperationClassTask, operationtype.ImageDownload, nil, metadata, run, nil, nil, r) if err != nil { cleanup(builddir, post) return response.InternalError(err) } + logger.Errorf("stgraber: images11") return operations.OperationResponse(op) } @@ -1783,6 +1813,7 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { logger.Error("Failed to update image", logger.Ctx{"err": err, "project": image.Project, "fingerprint": image.Fingerprint}) if err == context.Canceled { + logger.Errorf("here1") return nil } } else { @@ -1796,19 +1827,28 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { } } + logger.Errorf("newImage: %v", newImage) if newImage != nil { + logger.Errorf("stgraber: debug1") if len(nodes) > 1 { + logger.Errorf("stgraber: debug2") err := distributeImage(ctx, s, nodes, fingerprint, newImage) if err != nil { + logger.Errorf("stgraber: debug3") logger.Error("Failed to distribute new image", logger.Ctx{"err": err, "fingerprint": newImage.Fingerprint}) if err == context.Canceled { + logger.Errorf("stgraber: debug4") return nil } } + logger.Errorf("stgraber: debug2a") } + logger.Errorf("stgraber: debug5") + logger.Errorf("deleteIDs: %+v", deleteIDs) for _, ID := range deleteIDs { + logger.Errorf("stgraber: debug6") err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { // Remove the database entry for the image after distributing to cluster members. return tx.DeleteImage(ctx, ID) @@ -1818,8 +1858,11 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { } } } + + logger.Errorf("stgraber: debug7") } + logger.Errorf("stgraber: debug8") return nil } @@ -1834,11 +1877,14 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin // may be different for each cluster member. var imageVolumes []string + logger.Errorf("stgraber: distrib1") err := s.DB.Node.Transaction(context.TODO(), func(ctx context.Context, tx *db.NodeTx) error { + logger.Errorf("stgraber: distrib2") config, err := node.ConfigLoad(ctx, tx) if err != nil { return err } + logger.Errorf("stgraber: distrib3") vol := config.StorageImagesVolume() if vol != "" { @@ -1846,6 +1892,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin var pool *api.StoragePool + logger.Errorf("stgraber: distrib3a") err = s.DB.Cluster.Transaction(ctx, func(ctx context.Context, tx *db.ClusterTx) error { _, pool, _, err = tx.GetStoragePool(ctx, fields[0]) @@ -1854,12 +1901,15 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin if err != nil { return fmt.Errorf("Failed to get storage pool info: %w", err) } + logger.Errorf("stgraber: distrib3b") // Add the volume to the list if the pool is backed by remote // storage as only then the volumes are shared. if util.ValueInSlice(pool.Driver, db.StorageRemoteDriverNames()) { imageVolumes = append(imageVolumes, vol) } + + logger.Errorf("stgraber: distrib3c") } return nil @@ -1869,6 +1919,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin if err != nil { logger.Error("Failed to load config", logger.Ctx{"err": err}) } + logger.Errorf("stgraber: distrib4") // Skip own node localClusterAddress := s.LocalConfig.ClusterAddress() @@ -1877,6 +1928,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin var poolNames []string err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { + logger.Errorf("stgraber: distrib5") // Get the IDs of all storage pools on which a storage volume // for the requested image currently exists. poolIDs, err = tx.GetPoolsWithImage(ctx, newImage.Fingerprint) @@ -1885,6 +1937,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin return err } + logger.Errorf("stgraber: distrib6") // Translate the IDs to poolNames. poolNames, err = tx.GetPoolNamesFromIDs(ctx, poolIDs) if err != nil { @@ -1898,12 +1951,14 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin return err } + logger.Errorf("stgraber: distrib7") for _, nodeAddress := range nodes { if nodeAddress == localClusterAddress { continue } var nodeInfo db.NodeInfo + logger.Errorf("stgraber: distrib8") err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { var err error nodeInfo, err = tx.GetNodeByAddress(ctx, nodeAddress) @@ -1912,11 +1967,13 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin if err != nil { return fmt.Errorf("Failed to retrieve information about cluster member with address %q: %w", nodeAddress, err) } + logger.Errorf("stgraber: distrib9") client, err := cluster.Connect(nodeAddress, s.Endpoints.NetworkCert(), s.ServerCert(), nil, true) if err != nil { return fmt.Errorf("Failed to connect to %q for image synchronization: %w", nodeAddress, err) } + logger.Errorf("stgraber: distrib10") client = client.UseTarget(nodeInfo.Name) @@ -1924,6 +1981,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin if err != nil { logger.Error("Failed to retrieve information about cluster member", logger.Ctx{"err": err, "remote": nodeAddress}) } else { + logger.Errorf("stgraber: distrib11") vol := resp.Config["storage.images_volume"] skipDistribution := false @@ -1945,6 +2003,7 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin fields := strings.Split(vol, "/") + logger.Errorf("stgraber: distrib12") pool, _, err := client.GetStoragePool(fields[0]) if err != nil { logger.Error("Failed to get storage pool info", logger.Ctx{"err": err, "pool": fields[0]}) @@ -1953,9 +2012,11 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin imageVolumes = append(imageVolumes, vol) } } + logger.Errorf("stgraber: distrib13") } } + logger.Errorf("stgraber: distrib14") createArgs := &incus.ImageCreateArgs{} imageMetaPath := internalUtil.VarPath("images", newImage.Fingerprint) imageRootfsPath := internalUtil.VarPath("images", newImage.Fingerprint+".rootfs") @@ -1986,10 +2047,12 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin image := api.ImagesPost{} image.Filename = createArgs.MetaName + logger.Errorf("stgraber: distrib15") op, err := client.CreateImage(image, createArgs) if err != nil { return err } + logger.Errorf("stgraber: distrib16") select { case <-ctx.Done(): @@ -1997,11 +2060,13 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin return ctx.Err() default: } + logger.Errorf("stgraber: distrib16b") err = op.Wait() if err != nil { return err } + logger.Errorf("stgraber: distrib17") for _, poolName := range poolNames { if poolName == "" { @@ -2013,17 +2078,22 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin Pool: poolName, } + logger.Errorf("stgraber: distrib18") _, _, err = client.RawQuery("POST", "/internal/image-optimize", req, "") if err != nil { logger.Error("Failed creating new image in storage pool", logger.Ctx{"err": err, "remote": nodeAddress, "pool": poolName, "fingerprint": newImage.Fingerprint}) } + logger.Errorf("stgraber: distrib19") err = client.DeleteStoragePoolVolume(poolName, "image", oldFingerprint) if err != nil { logger.Error("Failed deleting old image from storage pool", logger.Ctx{"err": err, "remote": nodeAddress, "pool": poolName, "fingerprint": oldFingerprint}) } + logger.Errorf("stgraber: distrib20") } + logger.Errorf("stgraber: distrib21") } + logger.Errorf("stgraber: distrib22") return nil } diff --git a/test/main.sh b/test/main.sh index b159b3b7b0e..aace6387bd8 100755 --- a/test/main.sh +++ b/test/main.sh @@ -24,6 +24,8 @@ if [ -z "${INCUS_BACKEND:-}" ]; then INCUS_BACKEND="dir" fi +INCUS_BACKEND="dir" + # shellcheck disable=SC2034 INCUS_NETNS="" @@ -194,174 +196,7 @@ if [ "$#" -gt 0 ] && [ "$1" != "all" ] && [ "$1" != "cluster" ] && [ "$1" != "st exit fi -if [ "${1:-"all"}" != "cluster" ]; then - run_test test_check_deps "checking dependencies" - run_test test_database_restore "database restore" - run_test test_database_no_disk_space "database out of disk space" - run_test test_sql "SQL" - run_test test_tls_restrictions "TLS restrictions" - run_test test_oidc "OpenID Connect" - run_test test_openfga "OpenFGA" - run_test test_certificate_edit "Certificate edit" - run_test test_basic_usage "basic usage" - run_test test_remote_url "remote url handling" - run_test test_remote_admin "remote administration" - run_test test_remote_usage "remote usage" -fi - -if [ "${1:-"all"}" != "standalone" ]; then - run_test test_clustering_enable "clustering enable" - run_test test_clustering_membership "clustering membership" - run_test test_clustering_containers "clustering containers" - run_test test_clustering_storage "clustering storage" - run_test test_clustering_storage_single_node "clustering storage single node" - run_test test_clustering_network "clustering network" - run_test test_clustering_publish "clustering publish" - run_test test_clustering_profiles "clustering profiles" - run_test test_clustering_join_api "clustering join api" - run_test test_clustering_shutdown_nodes "clustering shutdown" - run_test test_clustering_projects "clustering projects" - run_test test_clustering_update_cert "clustering update cert" - run_test test_clustering_update_cert_reversion "clustering update cert reversion" - run_test test_clustering_address "clustering address" - run_test test_clustering_image_replication "clustering image replication" - run_test test_clustering_recover "clustering recovery" - run_test test_clustering_handover "clustering handover" - run_test test_clustering_rebalance "clustering rebalance" - run_test test_clustering_remove_raft_node "clustering remove raft node" - run_test test_clustering_failure_domains "clustering failure domains" - run_test test_clustering_image_refresh "clustering image refresh" - run_test test_clustering_evacuation "clustering evacuation" - run_test test_clustering_instance_placement_scriptlet "clustering instance placement scriptlet" - run_test test_clustering_move "clustering move" - run_test test_clustering_edit_configuration "clustering config edit" - run_test test_clustering_remove_members "clustering config remove members" - run_test test_clustering_autotarget "clustering autotarget member" - # run_test test_clustering_upgrade "clustering upgrade" - run_test test_clustering_groups "clustering groups" - run_test test_clustering_events "clustering events" - run_test test_clustering_uuid "clustering uuid" - run_test test_clustering_openfga "clustering OpenFGA" -fi - -if [ "${1:-"all"}" != "cluster" ]; then - run_test test_projects_default "default project" - run_test test_projects_crud "projects CRUD operations" - run_test test_projects_containers "containers inside projects" - run_test test_projects_snapshots "snapshots inside projects" - run_test test_projects_backups "backups inside projects" - run_test test_projects_profiles "profiles inside projects" - run_test test_projects_profiles_default "profiles from the global default project" - run_test test_projects_images "images inside projects" - run_test test_projects_images_default "images from the global default project" - run_test test_projects_storage "projects and storage pools" - run_test test_projects_network "projects and networks" - run_test test_projects_limits "projects limits" - run_test test_projects_usage "projects usage" - run_test test_projects_restrictions "projects restrictions" - run_test test_container_devices_disk "container devices - disk" - run_test test_container_devices_disk_restricted "container devices - disk - restricted" - run_test test_container_devices_nic_p2p "container devices - nic - p2p" - run_test test_container_devices_nic_bridged "container devices - nic - bridged" - run_test test_container_devices_nic_bridged_acl "container devices - nic - bridged - acl" - run_test test_container_devices_nic_bridged_filtering "container devices - nic - bridged - filtering" - run_test test_container_devices_nic_bridged_vlan "container devices - nic - bridged - vlan" - run_test test_container_devices_nic_physical "container devices - nic - physical" - run_test test_container_devices_nic_macvlan "container devices - nic - macvlan" - run_test test_container_devices_nic_ipvlan "container devices - nic - ipvlan" - run_test test_container_devices_nic_sriov "container devices - nic - sriov" - run_test test_container_devices_nic_routed "container devices - nic - routed" - run_test test_container_devices_infiniband_physical "container devices - infiniband - physical" - run_test test_container_devices_infiniband_sriov "container devices - infiniband - sriov" - run_test test_container_devices_proxy "container devices - proxy" - run_test test_container_devices_gpu "container devices - gpu" - run_test test_container_devices_unix_char "container devices - unix-char" - run_test test_container_devices_unix_block "container devices - unix-block" - run_test test_container_devices_tpm "container devices - tpm" - run_test test_container_move "container server-side move" - run_test test_container_syscall_interception "container syscall interception" - run_test test_security "security features" - run_test test_security_protection "container protection" - run_test test_image_expiry "image expiry" - run_test test_image_list_all_aliases "image list all aliases" - run_test test_image_auto_update "image auto-update" - run_test test_image_prefer_cached "image prefer cached" - run_test test_image_import_dir "import image from directory" - run_test test_image_refresh "image refresh" - run_test test_image_acl "image acl" - run_test test_cloud_init "cloud-init" - run_test test_exec "exec" - run_test test_concurrent_exec "concurrent exec" - run_test test_concurrent "concurrent startup" - run_test test_snapshots "container snapshots" - run_test test_snap_restore "snapshot restores" - run_test test_snap_expiry "snapshot expiry" - run_test test_snap_schedule "snapshot scheduling" - run_test test_snap_volume_db_recovery "snapshot volume database record recovery" - run_test test_config_profiles "profiles and configuration" - run_test test_config_edit "container configuration edit" - run_test test_property "container property" - run_test test_config_edit_container_snapshot_pool_config "container and snapshot volume configuration edit" - run_test test_container_metadata "manage container metadata and templates" - run_test test_container_snapshot_config "container snapshot configuration" - run_test test_server_config "server configuration" - run_test test_filemanip "file manipulations" - run_test test_network "network management" - run_test test_network_acl "network ACL management" - run_test test_network_forward "network address forwards" - run_test test_network_zone "network DNS zones" - run_test test_idmap "id mapping" - run_test test_template "file templating" - run_test test_pki "PKI mode" - run_test test_dev_incus "/dev/incus" - run_test test_fuidshift "fuidshift" - run_test test_migration "migration" - run_test test_lxc_to_incus "LXC to Incus" - run_test test_fdleak "fd leak" - run_test test_storage "storage" - run_test test_storage_volume_snapshots "storage volume snapshots" - run_test test_init_auto "incus admin init auto" - run_test test_init_interactive "incus admin init interactive" - run_test test_init_preseed "incus admin init preseed" - run_test test_storage_profiles "storage profiles" - run_test test_container_recover "container recover" - run_test test_bucket_recover "bucket recover" - run_test test_get_operations "test_get_operations" - run_test test_storage_volume_attach "attaching storage volumes" - run_test test_storage_driver_btrfs "btrfs storage driver" - run_test test_storage_driver_ceph "ceph storage driver" - run_test test_storage_driver_cephfs "cephfs storage driver" - run_test test_storage_driver_zfs "zfs storage driver" - run_test test_storage_buckets "storage buckets" - run_test test_storage_volume_import "storage volume import" - run_test test_storage_volume_initial_config "storage volume initial configuration" - run_test test_resources "resources" - run_test test_kernel_limits "kernel limits" - run_test test_console "console" - run_test test_query "query" - run_test test_storage_local_volume_handling "storage local volume handling" - run_test test_backup_import "backup import" - run_test test_backup_export "backup export" - run_test test_backup_rename "backup rename" - run_test test_backup_volume_export "backup volume export" - run_test test_backup_export_import_instance_only "backup export and import instance only" - run_test test_backup_volume_rename_delete "backup volume rename and delete" - run_test test_backup_different_instance_uuid "backup instance and check instance UUIDs" - run_test test_backup_volume_expiry "backup volume expiry" - run_test test_backup_export_import_recover "backup export, import, and recovery" - run_test test_container_local_cross_pool_handling "container local cross pool handling" - run_test test_incremental_copy "incremental container copy" - run_test test_profiles_project_default "profiles in default project" - run_test test_profiles_project_images_profiles "profiles in project with images and profiles enabled" - run_test test_profiles_project_images "profiles in project with images enabled and profiles disabled" - run_test test_profiles_project_profiles "profiles in project with images disabled and profiles enabled" - run_test test_filtering "API filtering" - run_test test_warnings "Warnings" - run_test test_metrics "Metrics" - run_test test_storage_volume_recover "Recover storage volumes" - run_test test_syslog_socket "Syslog socket" - run_test test_incus_user "incus-user" -fi +run_test test_clustering_image_refresh "clustering image refresh" # shellcheck disable=SC2034 TEST_RESULT=success diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 1c66016475f..885ddcdb7d4 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -2762,6 +2762,13 @@ test_clustering_image_refresh() { # while project foo should still have the old image. # Also, it should only show 1 entry for the old image and 2 entries # for the new one. + + echo "DEBUG: first" + sleep 5 + echo "old: ${old_fingerprint}" + echo "new: ${new_fingerprint}" + INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global 'select projects.name, images.fingerprint from images join projects on images.project_id=projects.id' + echo 'select images.fingerprint from images join projects on images.project_id=projects.id where projects.name="foo"' | INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global - | grep "${old_fingerprint}" [ "$(INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global 'select images.fingerprint from images' | grep -c "${old_fingerprint}")" -eq 1 ] || false @@ -2784,6 +2791,7 @@ test_clustering_image_refresh() { wait "${pid}" || true done + echo "DEBUG: second" echo 'select images.fingerprint from images join projects on images.project_id=projects.id where projects.name="foo"' | INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global - | grep "${old_fingerprint}" [ "$(INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global 'select images.fingerprint from images' | grep -c "${old_fingerprint}")" -eq 1 ] || false @@ -2812,6 +2820,7 @@ test_clustering_image_refresh() { pids="" + echo "DEBUG: third" echo 'select images.fingerprint from images join projects on images.project_id=projects.id where projects.name="foo"' | INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global - | grep "${old_fingerprint}" [ "$(INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global 'select images.fingerprint from images' | grep -c "${old_fingerprint}")" -eq 1 ] || false