From 47b1b698bf871d1ed1f81ac14e7aec24c7755686 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Thu, 9 May 2024 11:06:59 -0400 Subject: [PATCH 01/12] Combine two HardwareCharacteristics instances into one. CreateContainers was creating two structures, hc and hardware, of the same type. This caused the availablility zone data to be lost when the second structure was returned. Not caught because one strucuture was defined in the method signature as a return value. --- container/kvm/kvm.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/container/kvm/kvm.go b/container/kvm/kvm.go index 8435ec42613..5325854ebfd 100644 --- a/container/kvm/kvm.go +++ b/container/kvm/kvm.go @@ -162,7 +162,7 @@ func (manager *containerManager) CreateContainer( networkConfig *container.NetworkConfig, storageConfig *container.StorageConfig, callback environs.StatusCallbackFunc, -) (_ instances.Instance, hc *instance.HardwareCharacteristics, err error) { +) (_ instances.Instance, _ *instance.HardwareCharacteristics, err error) { name, err := manager.namespace.Hostname(instanceConfig.MachineId) if err != nil { @@ -183,8 +183,6 @@ func (manager *containerManager) CreateContainer( // disk. kvmContainer := KVMObjectFactory.New(name) - hc = &instance.HardwareCharacteristics{AvailabilityZone: &manager.availabilityZone} - // Create the cloud-init. cloudConfig, err := cloudinit.New(instanceConfig.Base.OS) if err != nil { @@ -237,10 +235,9 @@ func (manager *containerManager) CreateContainer( } startParams.ImageDownloadURL = imURL - var hardware instance.HardwareCharacteristics - hardware, err = instance.ParseHardware( - fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cores=%v", - startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores)) + hardware, err := instance.ParseHardware( + fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cores=%v availability-zone=%s", + startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores, manager.availabilityZone)) if err != nil { return nil, nil, errors.Annotate(err, "failed to parse hardware") } From 99969f2dda3b9a7ae62a4947272e162cb2ef0027 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Thu, 9 May 2024 13:57:00 -0400 Subject: [PATCH 02/12] Avoid panic if no machine architecture. Impoving the logic around platform creation specific to deducing from placement directives. Only look for machine scoped directives, the rest have yet to be created. Machine scoped placements must have hardware characteristics to deploy, other wise platform validation/creation is impossible. --- .../facades/client/application/backend.go | 1 + .../client/application/deployrepository.go | 131 ++++++++++++------ .../deployrepository_mocks_test.go | 14 ++ .../application/deployrepository_test.go | 107 ++++++++------ 4 files changed, 165 insertions(+), 88 deletions(-) diff --git a/apiserver/facades/client/application/backend.go b/apiserver/facades/client/application/backend.go index ab07de3ddb7..a3b9c0ca854 100644 --- a/apiserver/facades/client/application/backend.go +++ b/apiserver/facades/client/application/backend.go @@ -148,6 +148,7 @@ type CharmMeta interface { type Machine interface { Base() state.Base HardwareCharacteristics() (*instance.HardwareCharacteristics, error) + Id() string PublicAddress() (network.SpaceAddress, error) IsLockedForSeriesUpgrade() (bool, error) IsParentLockedForSeriesUpgrade() (bool, error) diff --git a/apiserver/facades/client/application/deployrepository.go b/apiserver/facades/client/application/deployrepository.go index b4881c9e371..161d9d1ede7 100644 --- a/apiserver/facades/client/application/deployrepository.go +++ b/apiserver/facades/client/application/deployrepository.go @@ -581,19 +581,21 @@ func (v *deployFromRepositoryValidator) createOrigin(arg params.DeployFromReposi // deducePlatform returns a platform for initial resolveCharm call. // At minimum, it must contain an architecture. -// Platform is determined by the args: architecture constraint and -// provided base. -// - Check placement to determine known machine platform. If diffs from -// other provided data return error. +// Platform is determined by the args: architecture constraint and provided +// base. Or from the model default architecture and base. // - If no base provided, use model default base. // - If no model default base, will be determined later. // - If no architecture provided, use model default. Fallback // to DefaultArchitecture. +// +// Then check for the platform of any machine scoped placement directives. +// Use that for the platform if no base provided by the user. +// Return an error if the placement platform and user provided base do not +// match. func (v *deployFromRepositoryValidator) deducePlatform(arg params.DeployFromRepositoryArg) (corecharm.Platform, bool, error) { argArch := arg.Cons.Arch argBase := arg.Base var usedModelDefaultBase bool - var usedModelDefaultArch bool // Try argBase with provided argArch and argBase first. platform := corecharm.Platform{} @@ -610,7 +612,6 @@ func (v *deployFromRepositoryValidator) deducePlatform(arg params.DeployFromRepo platform.Architecture = *mConst.Arch } else { platform.Architecture = arch.DefaultArchitecture - usedModelDefaultArch = true } } if argBase != nil { @@ -619,9 +620,7 @@ func (v *deployFromRepositoryValidator) deducePlatform(arg params.DeployFromRepo return corecharm.Platform{}, usedModelDefaultBase, err } platform.OS = base.OS - // platform channels don't model the concept of a risk - // so ensure that only the track is included - platform.Channel = base.Channel.Track + platform.Channel = base.Channel.String() } // Initial validation of platform from known data. @@ -630,78 +629,120 @@ func (v *deployFromRepositoryValidator) deducePlatform(arg params.DeployFromRepo return corecharm.Platform{}, usedModelDefaultBase, err } - // Match against platforms from placement placementPlatform, placementsMatch, err := v.platformFromPlacement(arg.Placement) - if err != nil && !errors.Is(err, errors.NotFound) { + if err != nil { return corecharm.Platform{}, usedModelDefaultBase, err } - if err == nil && !placementsMatch { - return corecharm.Platform{}, usedModelDefaultBase, errors.BadRequestf("bases of existing placement machines do not match") + // No machine scoped placement to match, return after checking + // if using default model base. + if placementPlatform == nil { + return v.modelDefaultBase(platform) + } + // There can be only 1 platform. + if !placementsMatch { + return corecharm.Platform{}, usedModelDefaultBase, errors.BadRequestf("bases of existing placement machines do not match each other") } - // No platform args, and one platform from placement, use that. - if placementsMatch && usedModelDefaultArch && argBase == nil { - return placementPlatform, usedModelDefaultBase, nil + // No base args provided. Use the placement platform to deploy. + if argBase == nil { + deployRepoLogger.Tracef("using placement platform %q to deploy", placementPlatform.String()) + return *placementPlatform, usedModelDefaultBase, nil } - if platform.Channel == "" { - mCfg, err := v.model.Config() - if err != nil { - return corecharm.Platform{}, usedModelDefaultBase, err - } - if db, ok := mCfg.DefaultBase(); ok { - defaultBase, err := corebase.ParseBaseFromString(db) - if err != nil { - return corecharm.Platform{}, usedModelDefaultBase, err - } - platform.OS = defaultBase.OS - // platform channels don't model the concept of a risk - // so ensure that only the track is included - platform.Channel = defaultBase.Channel.Track - usedModelDefaultBase = true - } + + // Check that the placement platform and the derived platform match + // when a base is supplied. There is no guarantee that all placement + // directives are machine scoped. + if placementPlatform.String() == platform.String() { + return *placementPlatform, usedModelDefaultBase, nil } - return platform, usedModelDefaultBase, nil + var msg string + if usedModelDefaultBase { + msg = fmt.Sprintf("base from placements, %q, does not match model default base %q", placementPlatform.String(), platform.String()) + } else { + msg = fmt.Sprintf("base from placements, %q, does not match requested base %q", placementPlatform.String(), platform.String()) + } + return corecharm.Platform{}, usedModelDefaultBase, fmt.Errorf(msg) + } -func (v *deployFromRepositoryValidator) platformFromPlacement(placements []*instance.Placement) (corecharm.Platform, bool, error) { +func (v *deployFromRepositoryValidator) modelDefaultBase(p corecharm.Platform) (corecharm.Platform, bool, error) { + // No provided platform channel, check model defaults. + if p.Channel != "" { + return p, false, nil + } + mCfg, err := v.model.Config() + if err != nil { + return p, false, nil + } + db, ok := mCfg.DefaultBase() + if !ok { + return p, false, nil + } + defaultBase, err := corebase.ParseBaseFromString(db) + if err != nil { + return corecharm.Platform{}, false, err + } + p.OS = defaultBase.OS + p.Channel = defaultBase.Channel.String() + return p, true, nil +} + +// platformFromPlacement attempts to choose a platform to deploy with based on the +// machine scoped placement values provided by the user. The platform for all provided +// machines much match. +func (v *deployFromRepositoryValidator) platformFromPlacement(placements []*instance.Placement) (*corecharm.Platform, bool, error) { if len(placements) == 0 { - return corecharm.Platform{}, false, errors.NotFoundf("placements") + return nil, false, nil } + machines := make([]Machine, 0) + var machineScopeCnt int // Find which machines in placement actually exist. for _, placement := range placements { - m, err := v.state.Machine(placement.Directive) - if errors.Is(err, errors.NotFound) { + if placement.Scope != instance.MachineScope { continue } + machineScopeCnt += 1 + m, err := v.state.Machine(placement.Directive) if err != nil { - return corecharm.Platform{}, false, err + return nil, false, errors.Annotate(err, "verifying machine for placement") } machines = append(machines, m) } - if len(machines) == 0 { - return corecharm.Platform{}, false, errors.NotFoundf("machines in placements") + + if machineScopeCnt == 0 { + // Not all placements refer to actual machines, no need to continue. + deployRepoLogger.Tracef("no machine scoped directives found in placements") + return nil, false, nil } // Gather platforms for existing machines var platform corecharm.Platform + // Use a set to determine if all the machines have the same platform. platStrings := set.NewStrings() for _, machine := range machines { b := machine.Base() - a, err := machine.HardwareCharacteristics() + hc, err := machine.HardwareCharacteristics() if err != nil { - return corecharm.Platform{}, false, err + if errors.Is(err, errors.NotFound) { + return nil, false, fmt.Errorf("machine %q not started, please retry when started", machine.Id()) + } + return nil, false, err + } + mArch := hc.Arch + if mArch == nil { + return nil, false, fmt.Errorf("machine %q has no saved architecture", machine.Id()) } - platString := fmt.Sprintf("%s/%s/%s", *a.Arch, b.OS, b.Channel) + platString := fmt.Sprintf("%s/%s/%s", *mArch, b.OS, b.Channel) p, err := corecharm.ParsePlatformNormalize(platString) if err != nil { - return corecharm.Platform{}, false, err + return nil, false, err } platform = p platStrings.Add(p.String()) } - return platform, platStrings.Size() == 1, nil + return &platform, platStrings.Size() == 1, nil } func (v *deployFromRepositoryValidator) resolveCharm(curl *charm.URL, requestedOrigin corecharm.Origin, force, usedModelDefaultBase bool, cons constraints.Value) (corecharm.ResolvedDataForDeploy, error) { diff --git a/apiserver/facades/client/application/deployrepository_mocks_test.go b/apiserver/facades/client/application/deployrepository_mocks_test.go index f012f65b3c1..bc64eab001e 100644 --- a/apiserver/facades/client/application/deployrepository_mocks_test.go +++ b/apiserver/facades/client/application/deployrepository_mocks_test.go @@ -640,6 +640,20 @@ func (mr *MockMachineMockRecorder) HardwareCharacteristics() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HardwareCharacteristics", reflect.TypeOf((*MockMachine)(nil).HardwareCharacteristics)) } +// Id mocks base method. +func (m *MockMachine) Id() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Id") + ret0, _ := ret[0].(string) + return ret0 +} + +// Id indicates an expected call of Id. +func (mr *MockMachineMockRecorder) Id() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Id", reflect.TypeOf((*MockMachine)(nil).Id)) +} + // IsLockedForSeriesUpgrade mocks base method. func (m *MockMachine) IsLockedForSeriesUpgrade() (bool, error) { m.ctrl.T.Helper() diff --git a/apiserver/facades/client/application/deployrepository_test.go b/apiserver/facades/client/application/deployrepository_test.go index 546c2e36d94..d7ee0b49f72 100644 --- a/apiserver/facades/client/application/deployrepository_test.go +++ b/apiserver/facades/client/application/deployrepository_test.go @@ -647,27 +647,27 @@ func (s *validatorSuite) TestDeducePlatformSimple(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{Architecture: "amd64"}) } -func (s *validatorSuite) TestDeducePlatformRiskInChannel(c *gc.C) { - defer s.setupMocks(c).Finish() - //model constraint default - s.state.EXPECT().ModelConstraints().Return(constraints.Value{Arch: strptr("amd64")}, nil) - - arg := params.DeployFromRepositoryArg{ - CharmName: "testme", - Base: ¶ms.Base{ - Name: "ubuntu", - Channel: "22.10/stable", - }, - } - plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) - c.Assert(err, gc.IsNil) - c.Assert(usedModelDefaultBase, jc.IsFalse) - c.Assert(plat, gc.DeepEquals, corecharm.Platform{ - Architecture: "amd64", - OS: "ubuntu", - Channel: "22.10", - }) -} +//func (s *validatorSuite) TestDeducePlatformRiskInChannel(c *gc.C) { +// defer s.setupMocks(c).Finish() +// //model constraint default +// s.state.EXPECT().ModelConstraints().Return(constraints.Value{Arch: strptr("amd64")}, nil) +// +// arg := params.DeployFromRepositoryArg{ +// CharmName: "testme", +// Base: ¶ms.Base{ +// Name: "ubuntu", +// Channel: "22.10/stable", +// }, +// } +// plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) +// c.Assert(err, gc.IsNil) +// c.Assert(usedModelDefaultBase, jc.IsFalse) +// c.Assert(plat, gc.DeepEquals, corecharm.Platform{ +// Architecture: "amd64", +// OS: "ubuntu", +// Channel: "22.10", +// }) +//} func (s *validatorSuite) TestDeducePlatformArgArchBase(c *gc.C) { defer s.setupMocks(c).Finish() @@ -686,7 +686,7 @@ func (s *validatorSuite) TestDeducePlatformArgArchBase(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{ Architecture: "arm64", OS: "ubuntu", - Channel: "22.10", + Channel: "22.10/stable", }) } @@ -711,7 +711,7 @@ func (s *validatorSuite) TestDeducePlatformModelDefaultBase(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{ Architecture: "amd64", OS: "ubuntu", - Channel: "22.04", + Channel: "22.04/stable", }) } @@ -721,16 +721,17 @@ func (s *validatorSuite) TestDeducePlatformPlacementSimpleFound(c *gc.C) { s.state.EXPECT().Machine("0").Return(s.machine, nil) s.machine.EXPECT().Base().Return(state.Base{ OS: "ubuntu", - Channel: "18.04", + Channel: "22.04", }) hwc := &instance.HardwareCharacteristics{Arch: strptr("arm64")} s.machine.EXPECT().HardwareCharacteristics().Return(hwc, nil) arg := params.DeployFromRepositoryArg{ CharmName: "testme", - Placement: []*instance.Placement{{ - Directive: "0", - }}, + Placement: []*instance.Placement{ + {Scope: instance.MachineScope, Directive: "0"}, + {Scope: "lxd"}, + }, } plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) c.Assert(err, gc.IsNil) @@ -738,27 +739,47 @@ func (s *validatorSuite) TestDeducePlatformPlacementSimpleFound(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{ Architecture: "arm64", OS: "ubuntu", - Channel: "18.04", + Channel: "22.04", }) } +func (s *validatorSuite) TestDeducePlatformPlacementNoPanic(c *gc.C) { + defer s.setupMocks(c).Finish() + s.state.EXPECT().ModelConstraints().Return(constraints.Value{}, nil) + s.machine.EXPECT().Id().Return("5/lxd/6") + s.state.EXPECT().Machine("5/lxd/6").Return(s.machine, nil) + s.machine.EXPECT().Base().Return(state.Base{ + OS: "ubuntu", + Channel: "22.04", + }) + hwc := &instance.HardwareCharacteristics{} + s.machine.EXPECT().HardwareCharacteristics().Return(hwc, nil) + + arg := params.DeployFromRepositoryArg{ + CharmName: "testme", + Placement: []*instance.Placement{ + {Scope: instance.MachineScope, Directive: "5/lxd/6"}, + {Scope: "lxd"}, + }, + } + _, _, err := s.getValidator().deducePlatform(arg) + c.Assert(err, gc.NotNil) +} + func (s *validatorSuite) TestDeducePlatformPlacementSimpleNotFound(c *gc.C) { defer s.setupMocks(c).Finish() //model constraint default s.state.EXPECT().ModelConstraints().Return(constraints.Value{Arch: strptr("amd64")}, nil) - s.model.EXPECT().Config().Return(config.New(config.UseDefaults, coretesting.FakeConfig())) - s.state.EXPECT().Machine("0/lxd/0").Return(nil, errors.NotFoundf("machine 0/lxd/0 not found")) + s.state.EXPECT().Machine("0/lxd/0").Return(nil, errors.NotFoundf("machine 0/lxd/0")) arg := params.DeployFromRepositoryArg{ CharmName: "testme", Placement: []*instance.Placement{{ - Directive: "0/lxd/0", + Scope: instance.MachineScope, Directive: "0/lxd/0", }}, } - plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) - c.Assert(err, gc.IsNil) - c.Assert(usedModelDefaultBase, jc.IsFalse) - c.Assert(plat, gc.DeepEquals, corecharm.Platform{Architecture: "amd64"}) + _, _, err := s.getValidator().deducePlatform(arg) + c.Assert(err, jc.ErrorIs, errors.NotFound) } func (s *validatorSuite) TestResolvedCharmValidationSubordinate(c *gc.C) { @@ -785,7 +806,7 @@ func (s *validatorSuite) TestDeducePlatformPlacementMutipleMatch(c *gc.C) { s.state.EXPECT().Machine(gomock.Any()).Return(s.machine, nil).Times(3) s.machine.EXPECT().Base().Return(state.Base{ OS: "ubuntu", - Channel: "18.04", + Channel: "22.04", }).Times(3) hwc := &instance.HardwareCharacteristics{Arch: strptr("arm64")} s.machine.EXPECT().HardwareCharacteristics().Return(hwc, nil).Times(3) @@ -793,9 +814,9 @@ func (s *validatorSuite) TestDeducePlatformPlacementMutipleMatch(c *gc.C) { arg := params.DeployFromRepositoryArg{ CharmName: "testme", Placement: []*instance.Placement{ - {Directive: "0"}, - {Directive: "1"}, - {Directive: "3"}, + {Scope: instance.MachineScope, Directive: "0"}, + {Scope: instance.MachineScope, Directive: "1"}, + {Scope: instance.MachineScope, Directive: "3"}, }, } plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) @@ -804,7 +825,7 @@ func (s *validatorSuite) TestDeducePlatformPlacementMutipleMatch(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{ Architecture: "arm64", OS: "ubuntu", - Channel: "18.04", + Channel: "22.04", }) } @@ -815,7 +836,7 @@ func (s *validatorSuite) TestDeducePlatformPlacementMutipleMatchFail(c *gc.C) { s.machine.EXPECT().Base().Return( state.Base{ OS: "ubuntu", - Channel: "18.04", + Channel: "22.04", }).AnyTimes() gomock.InOrder( s.machine.EXPECT().HardwareCharacteristics().Return( @@ -829,8 +850,8 @@ func (s *validatorSuite) TestDeducePlatformPlacementMutipleMatchFail(c *gc.C) { arg := params.DeployFromRepositoryArg{ CharmName: "testme", Placement: []*instance.Placement{ - {Directive: "0"}, - {Directive: "1"}, + {Scope: instance.MachineScope, Directive: "0"}, + {Scope: instance.MachineScope, Directive: "1"}, }, } _, _, err := s.getValidator().deducePlatform(arg) From 8930477ac9e592bdc82012f22f482638b9970ae9 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Thu, 9 May 2024 14:18:37 -0400 Subject: [PATCH 03/12] Ensure that CreateContainer returns architecture. Architecture from CreateContainer results become the instance data. Instance data is required to determine the platform, architecture/os/channel, of a machine when deploying with placement directives to an existing machine. --- container/lxd/manager.go | 2 ++ container/lxd/manager_test.go | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/container/lxd/manager.go b/container/lxd/manager.go index 560757d8e0a..3ddb128d709 100644 --- a/container/lxd/manager.go +++ b/container/lxd/manager.go @@ -141,7 +141,9 @@ func (m *containerManager) CreateContainer( _ = callback(status.Running, "Container started", nil) virtType := string(spec.VirtType) + arch := c.Arch() hardware := &instance.HardwareCharacteristics{ + Arch: &arch, AvailabilityZone: &m.availabilityZone, VirtType: &virtType, } diff --git a/container/lxd/manager_test.go b/container/lxd/manager_test.go index c44d6ceee5b..d082aa26d86 100644 --- a/container/lxd/manager_test.go +++ b/container/lxd/manager_test.go @@ -141,6 +141,7 @@ func (s *managerSuite) TestContainerCreateDestroy(c *gc.C) { exp.GetInstanceState(hostName).Return( &lxdapi.InstanceState{ + StatusCode: lxdapi.Running, Network: map[string]lxdapi.InstanceStateNetwork{ "fan0": { @@ -152,8 +153,12 @@ func (s *managerSuite) TestContainerCreateDestroy(c *gc.C) { }, }, }, lxdtesting.ETag, nil).Times(2) - - exp.GetInstance(hostName).Return(&lxdapi.Instance{Name: hostName, Type: "container"}, lxdtesting.ETag, nil) + inst := &lxdapi.Instance{ + Name: hostName, + Type: "container", + InstancePut: lxdapi.InstancePut{Architecture: "amd64"}, + } + exp.GetInstance(hostName).Return(inst, lxdtesting.ETag, nil) // Arrangements for the container destruction. stopReq := lxdapi.InstanceStatePut{ @@ -174,6 +179,8 @@ func (s *managerSuite) TestContainerCreateDestroy(c *gc.C) { instanceId := instance.Id() c.Check(string(instanceId), gc.Equals, hostName) + c.Check(hc.Arch, gc.NotNil) + c.Check(*hc.Arch, gc.Equals, "amd64") instanceStatus := instance.Status(context.NewEmptyCloudCallContext()) c.Check(instanceStatus.Status, gc.Equals, status.Running) @@ -211,7 +218,12 @@ func (s *managerSuite) TestContainerCreateUpdateIPv4Network(c *gc.C) { s.expectStartOp(ctrl) exp.UpdateInstanceState(hostName, lxdapi.InstanceStatePut{Action: "start", Timeout: -1}, "").Return(s.startOp, nil) - exp.GetInstance(hostName).Return(&lxdapi.Instance{Name: hostName, Type: "container"}, lxdtesting.ETag, nil) + inst := &lxdapi.Instance{ + Name: hostName, + Type: "container", + InstancePut: lxdapi.InstancePut{Architecture: "amd64"}, + } + exp.GetInstance(hostName).Return(inst, lxdtesting.ETag, nil) // Supplying config for a single device with default bridge and without a // CIDR will cause the default bridge to be updated with IPv4 config. From 4d1b7f6d2c8d5401b70137c80e9734304ac920b5 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Thu, 9 May 2024 14:33:50 -0400 Subject: [PATCH 04/12] Update container architecture on machine import. Fixing a bug where containers archicture has not been saved previously. Need to determine the charm platform, architecture/os/channel, when using machine scoped placements directives in deploy. Assume that the container has the same architecture as the host machine. This is hardcoded for KVM containers today and what is supported for LXC. --- state/migration_import.go | 18 ++++++--- state/migration_import_test.go | 70 +++++++++++++++++++++------------- 2 files changed, 57 insertions(+), 31 deletions(-) diff --git a/state/migration_import.go b/state/migration_import.go index 755b0bcd863..f1357523eec 100644 --- a/state/migration_import.go +++ b/state/migration_import.go @@ -449,7 +449,7 @@ func (i *importer) modelUsers() error { func (i *importer) machines() error { i.logger.Debugf("importing machines") for _, m := range i.model.Machines() { - if err := i.machine(m); err != nil { + if err := i.machine(m, ""); err != nil { i.logger.Errorf("error importing machine: %s", err) return errors.Annotate(err, m.Id()) } @@ -459,7 +459,7 @@ func (i *importer) machines() error { return nil } -func (i *importer) machine(m description.Machine) error { +func (i *importer) machine(m description.Machine, arch string) error { // Import this machine, then import its containers. i.logger.Debugf("importing machine %s", m.Id()) @@ -516,7 +516,7 @@ func (i *importer) machine(m description.Machine) error { ) // 3. create op for adding in instance data - prereqOps = append(prereqOps, i.machineInstanceOp(mdoc, instance)) + prereqOps = append(prereqOps, i.machineInstanceOp(mdoc, instance, arch)) if parentId := container.ParentId(mdoc.Id); parentId != "" { prereqOps = append(prereqOps, @@ -557,7 +557,9 @@ func (i *importer) machine(m description.Machine) error { // Now that this machine exists in the database, process each of the // containers in this machine. for _, container := range m.Containers() { - if err := i.machine(container); err != nil { + // Pass the parent machine's architecture when creating an op to fix + // a container's data. + if err := i.machine(container, m.Instance().Architecture()); err != nil { return errors.Annotate(err, container.Id()) } } @@ -653,7 +655,11 @@ func (i *importer) applicationPortsOp(a description.Application) txn.Op { } } -func (i *importer) machineInstanceOp(mdoc *machineDoc, inst description.CloudInstance) txn.Op { +// machineInstanceOp creates for txn operation for inserting a doc into +// instance data collection. The parentArch is included to fix data from +// older versions of juju where the architecture of a container was left +// empty. +func (i *importer) machineInstanceOp(mdoc *machineDoc, inst description.CloudInstance, parentArch string) txn.Op { doc := &instanceData{ DocID: mdoc.DocID, MachineId: mdoc.Id, @@ -664,6 +670,8 @@ func (i *importer) machineInstanceOp(mdoc *machineDoc, inst description.CloudIns if arch := inst.Architecture(); arch != "" { doc.Arch = &arch + } else if parentArch != "" { + doc.Arch = &parentArch } if mem := inst.Memory(); mem != 0 { doc.Mem = &mem diff --git a/state/migration_import_test.go b/state/migration_import_test.go index 67bcff81150..e9ff9a914f8 100644 --- a/state/migration_import_test.go +++ b/state/migration_import_test.go @@ -324,41 +324,47 @@ func (s *MigrationImportSuite) TestMeterStatusNotAvailable(c *gc.C) { } func (s *MigrationImportSuite) AssertMachineEqual(c *gc.C, newMachine, oldMachine *state.Machine) { - c.Assert(newMachine.Id(), gc.Equals, oldMachine.Id()) - c.Assert(newMachine.Principals(), jc.DeepEquals, oldMachine.Principals()) - c.Assert(newMachine.Base().String(), gc.Equals, oldMachine.Base().String()) - c.Assert(newMachine.ContainerType(), gc.Equals, oldMachine.ContainerType()) + c.Check(newMachine.Id(), gc.Equals, oldMachine.Id()) + c.Check(newMachine.Principals(), jc.DeepEquals, oldMachine.Principals()) + c.Check(newMachine.Base().String(), gc.Equals, oldMachine.Base().String()) + c.Check(newMachine.ContainerType(), gc.Equals, oldMachine.ContainerType()) newHardware, err := newMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) oldHardware, err := oldMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) - c.Assert(newHardware, jc.DeepEquals, oldHardware) - c.Assert(newMachine.Jobs(), jc.DeepEquals, oldMachine.Jobs()) - c.Assert(newMachine.Life(), gc.Equals, oldMachine.Life()) + if oldMachine.ContainerType() != instance.NONE && oldHardware.Arch == nil { + // test that containers have an architecture added during import + // if not already there. + oldHardware.Arch = strPtr("amd64") + } + c.Check(newHardware, jc.DeepEquals, oldHardware, gc.Commentf("machine %q", newMachine.Id())) + + c.Check(newMachine.Jobs(), jc.DeepEquals, oldMachine.Jobs()) + c.Check(newMachine.Life(), gc.Equals, oldMachine.Life()) newTools, err := newMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) oldTools, err := oldMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Assert(newTools, jc.DeepEquals, oldTools) + c.Check(newTools, jc.DeepEquals, oldTools) oldStatus, err := oldMachine.Status() c.Assert(err, jc.ErrorIsNil) newStatus, err := newMachine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(newStatus, jc.DeepEquals, oldStatus) + c.Check(newStatus, jc.DeepEquals, oldStatus) oldInstID, oldInstDisplayName, err := oldMachine.InstanceNames() c.Assert(err, jc.ErrorIsNil) newInstID, newInstDisplayName, err := newMachine.InstanceNames() c.Assert(err, jc.ErrorIsNil) - c.Assert(newInstID, gc.Equals, oldInstID) - c.Assert(newInstDisplayName, gc.Equals, oldInstDisplayName) + c.Check(newInstID, gc.Equals, oldInstID) + c.Check(newInstDisplayName, gc.Equals, oldInstDisplayName) oldStatus, err = oldMachine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) newStatus, err = newMachine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(newStatus, jc.DeepEquals, oldStatus) + c.Check(newStatus, jc.DeepEquals, oldStatus) } func (s *MigrationImportSuite) TestMachines(c *gc.C) { @@ -373,6 +379,8 @@ func (s *MigrationImportSuite) TestMachines(c *gc.C) { machine1 := s.Factory.MakeMachine(c, &factory.MachineParams{ Constraints: cons, Characteristics: &instance.HardwareCharacteristics{ + Arch: cons.Arch, + Mem: cons.Mem, RootDiskSource: &source, }, DisplayName: displayName, @@ -388,16 +396,22 @@ func (s *MigrationImportSuite) TestMachines(c *gc.C) { c.Assert(hardware, gc.NotNil) _ = s.Factory.MakeMachineNested(c, machine1.Id(), nil) + _ = s.Factory.MakeMachineNested(c, machine1.Id(), &factory.MachineParams{ + Constraints: constraints.MustParse("arch=arm64"), + Characteristics: &instance.HardwareCharacteristics{ + Arch: cons.Arch, + }, + }) allMachines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) - c.Assert(allMachines, gc.HasLen, 2) + c.Assert(allMachines, gc.HasLen, 3) newModel, newSt := s.importModel(c, s.State) importedMachines, err := newSt.AllMachines() c.Assert(err, jc.ErrorIsNil) - c.Assert(importedMachines, gc.HasLen, 2) + c.Assert(importedMachines, gc.HasLen, 3) // AllMachines returns the machines in the same order, yay us. for i, newMachine := range importedMachines { @@ -406,30 +420,34 @@ func (s *MigrationImportSuite) TestMachines(c *gc.C) { // And a few extra checks. parent := importedMachines[0] - container := importedMachines[1] containers, err := parent.Containers() c.Assert(err, jc.ErrorIsNil) - c.Assert(containers, jc.DeepEquals, []string{container.Id()}) - parentId, isContainer := container.ParentId() - c.Assert(parentId, gc.Equals, parent.Id()) - c.Assert(isContainer, jc.IsTrue) + c.Assert(containers, jc.SameContents, []string{importedMachines[1].Id(), importedMachines[2].Id()}) + for _, cont := range []*state.Machine{importedMachines[1], importedMachines[2]} { + parentId, isContainer := cont.ParentId() + c.Assert(parentId, gc.Equals, parent.Id()) + c.Assert(isContainer, jc.IsTrue) + } s.assertAnnotations(c, newModel, parent) s.checkStatusHistory(c, machine1, parent, 5) newCons, err := parent.Constraints() - c.Assert(err, jc.ErrorIsNil) - // Can't test the constraints directly, so go through the string repr. - c.Assert(newCons.String(), gc.Equals, cons.String()) + if c.Check(err, jc.ErrorIsNil) { + // Can't test the constraints directly, so go through the string repr. + c.Check(newCons.String(), gc.Equals, cons.String()) + } // Test the modification status is set to the initial state. modStatus, err := parent.ModificationStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(modStatus.Status, gc.Equals, status.Idle) + if c.Check(err, jc.ErrorIsNil) { + c.Check(modStatus.Status, gc.Equals, status.Idle) + } characteristics, err := parent.HardwareCharacteristics() - c.Assert(err, jc.ErrorIsNil) - c.Assert(*characteristics.RootDiskSource, gc.Equals, "bunyan") + if c.Check(err, jc.ErrorIsNil) { + c.Check(*characteristics.RootDiskSource, gc.Equals, "bunyan") + } } func (s *MigrationImportSuite) TestMachineDevices(c *gc.C) { From 825d8d9e2ab81341d5c1533befcb8149841c8d65 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Thu, 9 May 2024 15:44:00 -0400 Subject: [PATCH 05/12] Upgrade step to add architecture to containers instance data. Use architecture of the host to fill in container's instance data. It's the only architecture supported by LXD and hardcoded for KVM. Fixes a bug where containers not having architectures lead to a panic in deploying charms with machine scoped placement. --- state/upgrades.go | 57 ++++++++++++++++++++++++++++++ state/upgrades_test.go | 72 ++++++++++++++++++++++++++++++++++++++ upgrades/backend.go | 5 +++ upgrades/operations.go | 2 ++ upgrades/steps_335.go | 20 +++++++++++ upgrades/steps_335_test.go | 26 ++++++++++++++ upgrades/upgrade_test.go | 2 ++ 7 files changed, 184 insertions(+) create mode 100644 upgrades/steps_335.go create mode 100644 upgrades/steps_335_test.go diff --git a/state/upgrades.go b/state/upgrades.go index f0440d253bc..f5b81d74d48 100644 --- a/state/upgrades.go +++ b/state/upgrades.go @@ -12,6 +12,7 @@ import ( "github.com/juju/names/v5" "github.com/juju/juju/core/charm" + "github.com/juju/juju/core/container" ) // Until we add 3.0 upgrade steps, keep static analysis happy. @@ -180,3 +181,59 @@ func FillInEmptyCharmhubTracks(pool *StatePool) error { return nil })) } + +// AssignArchToContainers assigns an architecture to container +// instance data based on their host. +func AssignArchToContainers(pool *StatePool) error { + return errors.Trace(runForAllModelStates(pool, func(st *State) error { + instData, closer := st.db().GetCollection(instanceDataC) + defer closer() + + var containers []instanceData + if err := instData.Find(bson.M{"machineid": bson.M{"$regex": "[0-9]+/[a-z]+/[0-9]+"}}).All(&containers); err != nil { + return errors.Annotatef(err, "failed to read container instance data") + } + // Nothing to do if no containers in the model. + if len(containers) == 0 { + return nil + } + + var machines []instanceData + if err := instData.Find(bson.D{{"machineid", bson.D{{"$regex", "^[0-9]+$"}}}}).All(&machines); err != nil { + return errors.Annotatef(err, "failed to read machine instance data") + } + + machineArch := make(map[string]string, len(machines)) + + for _, machine := range machines { + if machine.Arch == nil { + logger.Errorf("no architecture for machine %q", machine.MachineId) + } + machineArch[machine.MachineId] = *machine.Arch + } + + var ops []txn.Op + for _, cont := range containers { + if cont.Arch != nil { + continue + } + mID := container.ParentId(cont.MachineId) + a, ok := machineArch[mID] + if !ok { + logger.Errorf("no instance data for machine %q, but has container %q", mID, cont.MachineId) + continue + } + ops = append(ops, txn.Op{ + C: instanceDataC, + Id: cont.DocID, + Assert: txn.DocExists, + Update: bson.M{"$set": bson.M{"arch": &a}}, + }) + } + + if len(ops) > 0 { + return errors.Trace(st.runRawTransaction(ops)) + } + return nil + })) +} diff --git a/state/upgrades_test.go b/state/upgrades_test.go index 7a34ab1fc2a..50ef0148cf7 100644 --- a/state/upgrades_test.go +++ b/state/upgrades_test.go @@ -255,3 +255,75 @@ func (s *upgradesSuite) TestFillInEmptyCharmhubTracks(c *gc.C) { expectedData := upgradedData(applications, expected) s.assertUpgradedData(c, FillInEmptyCharmhubTracks, expectedData) } + +func (s *upgradesSuite) TestAssignArchToContainers(c *gc.C) { + st := s.state + instanceDataColl, closer := st.db().GetRawCollection(instanceDataC) + defer closer() + + err := instanceDataColl.Insert(bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "0"), + "model-uuid": st.ModelUUID(), + "machineid": "0", + "arch": "arm64", + }) + c.Assert(err, jc.ErrorIsNil) + err = instanceDataColl.Insert(bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "0/lxd/7"), + "model-uuid": st.ModelUUID(), + "machineid": "0/lxd/7", + }) + c.Assert(err, jc.ErrorIsNil) + err = instanceDataColl.Insert(bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "2"), + "model-uuid": st.ModelUUID(), + "machineid": "2", + "arch": "amd64", + }) + c.Assert(err, jc.ErrorIsNil) + err = instanceDataColl.Insert(bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "3"), + "model-uuid": st.ModelUUID(), + "machineid": "3", + "arch": "amd64", + }) + c.Assert(err, jc.ErrorIsNil) + err = instanceDataColl.Insert(bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "3/kvm/2"), + "model-uuid": st.ModelUUID(), + "machineid": "3/kvm/2", + }) + c.Assert(err, jc.ErrorIsNil) + + var expected bsonMById + expected = append(expected, bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "0"), + "model-uuid": st.ModelUUID(), + "machineid": "0", + "arch": "arm64", + }, bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "0/lxd/7"), + "model-uuid": st.ModelUUID(), + "machineid": "0/lxd/7", + "arch": "arm64", + }, bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "2"), + "model-uuid": st.ModelUUID(), + "machineid": "2", + "arch": "amd64", + }, bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "3"), + "model-uuid": st.ModelUUID(), + "machineid": "3", + "arch": "amd64", + }, bson.M{ + "_id": ensureModelUUID(st.ModelUUID(), "3/kvm/2"), + "model-uuid": st.ModelUUID(), + "machineid": "3/kvm/2", + "arch": "amd64", + }) + + sort.Sort(expected) + expectedData := upgradedData(instanceDataColl, expected) + s.assertUpgradedData(c, AssignArchToContainers, expectedData) +} diff --git a/upgrades/backend.go b/upgrades/backend.go index 65b405111c4..304a999ea91 100644 --- a/upgrades/backend.go +++ b/upgrades/backend.go @@ -13,6 +13,7 @@ import ( type StateBackend interface { ConvertApplicationOfferTokenKeys() error FillInEmptyCharmhubTracks() error + AssignArchToContainers() error } // Model is an interface providing access to the details of a model within the @@ -38,3 +39,7 @@ func (s stateBackend) ConvertApplicationOfferTokenKeys() error { func (s stateBackend) FillInEmptyCharmhubTracks() error { return state.FillInEmptyCharmhubTracks(s.pool) } + +func (s stateBackend) AssignArchToContainers() error { + return state.AssignArchToContainers(s.pool) +} diff --git a/upgrades/operations.go b/upgrades/operations.go index 2b7d4834019..d4f52a9cb63 100644 --- a/upgrades/operations.go +++ b/upgrades/operations.go @@ -21,6 +21,7 @@ var stateUpgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{version.MustParse("3.3.1"), stateStepsFor331()}, upgradeToVersion{version.MustParse("3.3.4"), stateStepsFor334()}, + upgradeToVersion{version.MustParse("3.3.5"), stateStepsFor335()}, } return steps } @@ -32,6 +33,7 @@ var upgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{version.MustParse("3.3.1"), stepsFor331()}, upgradeToVersion{version.MustParse("3.3.4"), stepsFor334()}, + upgradeToVersion{version.MustParse("3.3.5"), stepsFor335()}, } return steps } diff --git a/upgrades/steps_335.go b/upgrades/steps_335.go new file mode 100644 index 00000000000..0b5d93a7d7e --- /dev/null +++ b/upgrades/steps_335.go @@ -0,0 +1,20 @@ +// Copyright 2024 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +func stepsFor335() []Step { + return []Step{} +} + +func stateStepsFor335() []Step { + return []Step{ + &upgradeStep{ + description: "assign architectures to container's instance data", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return context.State().AssignArchToContainers() + }, + }, + } +} diff --git a/upgrades/steps_335_test.go b/upgrades/steps_335_test.go new file mode 100644 index 00000000000..63b62cc1b4f --- /dev/null +++ b/upgrades/steps_335_test.go @@ -0,0 +1,26 @@ +// Copyright 2024 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/version/v2" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/upgrades" +) + +var v335 = version.MustParse("3.3.5") + +type steps335Suite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&steps335Suite{}) + +func (s *steps335Suite) TestAssignArchToContainers(c *gc.C) { + step := findStateStep(c, v335, "assign architectures to container's instance data") + c.Assert(step.Targets(), jc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) +} diff --git a/upgrades/upgrade_test.go b/upgrades/upgrade_test.go index 9fb9f0f3869..19ef402bb31 100644 --- a/upgrades/upgrade_test.go +++ b/upgrades/upgrade_test.go @@ -599,6 +599,7 @@ func (s *upgradeSuite) TestStateUpgradeOperationsVersions(c *gc.C) { c.Assert(versions, gc.DeepEquals, []string{ "3.3.1", "3.3.4", + "3.3.5", }) } @@ -607,6 +608,7 @@ func (s *upgradeSuite) TestUpgradeOperationsVersions(c *gc.C) { c.Assert(versions, gc.DeepEquals, []string{ "3.3.1", "3.3.4", + "3.3.4", }) } From f8779fbec944c9df20042db56fe3899e13bfdf99 Mon Sep 17 00:00:00 2001 From: Caner Derici Date: Mon, 13 May 2024 10:56:48 -0600 Subject: [PATCH 06/12] Follow up on controllercharm prometheus test fix --- tests/suites/controllercharm/prometheus.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/suites/controllercharm/prometheus.sh b/tests/suites/controllercharm/prometheus.sh index e2cbed9ec49..c333b402a27 100644 --- a/tests/suites/controllercharm/prometheus.sh +++ b/tests/suites/controllercharm/prometheus.sh @@ -67,14 +67,13 @@ run_prometheus_multiple_units() { juju status --format json | jq -r "$(active_condition "p2" 1)" | check "p2" juju remove-relation p1 controller - # Wait until the application p1 settles before health checks - wait_for "p1" "$(active_condition "p1" 0)" # Check Juju controller is removed from Prometheus targets retry 'check_prometheus_no_target p1 0' 5 # Check no errors in controller charm or Prometheus juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" - juju status --format json | jq -r "$(active_condition "p1" 0)" | check "p1" + # Ensure p1 is still healty + wait_for "p1" "$(active_condition "p1" 0)" juju remove-application p1 --destroy-storage \ --force --no-wait # TODO: remove these flags once storage bug is fixed From 5399523ddf59d2ae74f16603e7861803397a21c2 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Fri, 10 May 2024 10:55:28 -0400 Subject: [PATCH 07/12] Improve run_deploy_charm_placement_directive. Add a container to the mix due to LP:2064174. Due to issues with nested LXD containers, use a virtual-machine for the host machine when testing against the lxd provider only. Added a check to see that the machine's base was used to deploy the charm. Implement a wait_for_container_agent_status for use in the updated test. --- tests/includes/wait-for.sh | 33 ++++++++++++++++++++++++++++ tests/suites/deploy/deploy_charms.sh | 31 +++++++++++++++++++++----- 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/tests/includes/wait-for.sh b/tests/includes/wait-for.sh index 7fef3fc846d..24fdb1fe29a 100644 --- a/tests/includes/wait-for.sh +++ b/tests/includes/wait-for.sh @@ -172,6 +172,39 @@ wait_for_machine_agent_status() { fi } +# wait_for_container_agent_status blocks until the machine agent for the specified +# machine instance ID reports the requested status. +# +# ``` +# wait_for_container_agent_status +# +# example: +# wait_for_container_agent_status "0/lxd/0 "started" +# ``` +wait_for_container_agent_status() { + local inst_id status + + inst_id=${1} + status=${2} + + parent_id=$(echo "${inst_id}" | awk 'BEGIN {FS="/";} {print $1}') + + attempt=0 + # shellcheck disable=SC2046,SC2143 + until [ $(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${parent_id}\"] | .[\"containers\"] | .[\"${inst_id}\"] | .[\"juju-status\"] | .[\"current\"]" | grep "${status}") ]; do + echo "[+] (attempt ${attempt}) polling machines" + juju machines | grep "$inst_id" 2>&1 | sed 's/^/ | /g' + sleep "${SHORT_TIMEOUT}" + attempt=$((attempt + 1)) + done + + if [[ ${attempt} -gt 0 ]]; then + echo "[+] $(green 'Completed polling machines')" + juju machines | grep "$inst_id" 2>&1 | sed 's/^/ | /g' + sleep "${SHORT_TIMEOUT}" + fi +} + # wait_for_machine_netif_count blocks until the number of detected network # interfaces for the requested machine instance ID becomes equal to the desired # value. diff --git a/tests/suites/deploy/deploy_charms.sh b/tests/suites/deploy/deploy_charms.sh index 65d16d37ed0..879968e8870 100644 --- a/tests/suites/deploy/deploy_charms.sh +++ b/tests/suites/deploy/deploy_charms.sh @@ -21,12 +21,31 @@ run_deploy_charm_placement_directive() { ensure "test-deploy-charm-placement-directive" "${file}" - juju add-machine --base ubuntu@20.04 - juju deploy jameinel-ubuntu-lite --to 0 + expected_base="ubuntu@20.04" + # Setup machines for placement based on provider used for test. + # Container in container doesn't work consistently enough, + # for test. Use kvm via lxc instead. + if [[ ${BOOTSTRAP_PROVIDER} == "lxd" ]] || [[ ${BOOTSTRAP_PROVIDER} == "localhost" ]]; then + juju add-machine --base "${expected_base}" --constraints="virt-type=virtual-machine" + else + juju add-machine --base "${expected_base}" + fi + + juju add-machine lxd:0 --base "${expected_base}" + + # If 0/lxd/0 is started, so must machine 0 be. + wait_for_container_agent_status "0/lxd/0" "started" + + juju deploy ubuntu-lite -n 2 --to 0,0/lxd/0 wait_for "ubuntu-lite" "$(idle_condition "ubuntu-lite")" - destroy_model "test-deploy-charm-placement-directive" + # Verify based used to create the machines was used during + # deploy. + base_name=$(juju status --format=json | jq -r '.applications."ubuntu-lite".base.name') + base_chan=$(juju status --format=json | jq -r '.applications."ubuntu-lite".base.channel') + echo "$base_name@$base_chan" | check "$expected_base" + destroy_model "test-deploy-charm-placement-directive" } run_deploy_charm_unsupported_series() { @@ -57,10 +76,10 @@ run_deploy_specific_series() { # Have to check against default base, to avoid false positives. # These two bases should be different. default_base="ubuntu@22.04" - specific_base="ubuntu@20.04" + expected_base="ubuntu@20.04" juju deploy "$charm_name" app1 - juju deploy "$charm_name" app2 --base "$specific_base" + juju deploy "$charm_name" app2 --base "$expected_base" base_name1=$(juju status --format=json | jq -r ".applications.app1.base.name") base_chan1=$(juju status --format=json | jq -r ".applications.app1.base.channel") base_name2=$(juju status --format=json | jq -r ".applications.app2.base.name") @@ -69,7 +88,7 @@ run_deploy_specific_series() { destroy_model "test-deploy-specific-series" echo "$base_name1@$base_chan1" | check "$default_base" - echo "$base_name2@$base_chan2" | check "$specific_base" + echo "$base_name2@$base_chan2" | check "$expected_base" } run_deploy_lxd_profile_charm() { From aa6b302137520bc2258d7e41697b9341450604e2 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Tue, 14 May 2024 16:25:13 +0200 Subject: [PATCH 08/12] Add error log message when placement platforms don't match. We know the next caller will fail the deploy if the platforms don't match. Put them in an error log for folks to find. --- apiserver/facades/client/application/deployrepository.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/apiserver/facades/client/application/deployrepository.go b/apiserver/facades/client/application/deployrepository.go index 161d9d1ede7..93e2d588413 100644 --- a/apiserver/facades/client/application/deployrepository.go +++ b/apiserver/facades/client/application/deployrepository.go @@ -741,6 +741,9 @@ func (v *deployFromRepositoryValidator) platformFromPlacement(placements []*inst platform = p platStrings.Add(p.String()) } + if platStrings.Size() == 1 { + deployRepoLogger.Errorf("Mismatched platforms for machine scoped placements %s", platStrings.SortedValues()) + } return &platform, platStrings.Size() == 1, nil } From 92cafdaaadbed6d779f38834e8f0ced30326d294 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Tue, 14 May 2024 16:26:17 +0200 Subject: [PATCH 09/12] Remove test which is commented out and no longer needed. --- .../application/deployrepository_test.go | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/apiserver/facades/client/application/deployrepository_test.go b/apiserver/facades/client/application/deployrepository_test.go index d7ee0b49f72..5e65e931a96 100644 --- a/apiserver/facades/client/application/deployrepository_test.go +++ b/apiserver/facades/client/application/deployrepository_test.go @@ -647,28 +647,6 @@ func (s *validatorSuite) TestDeducePlatformSimple(c *gc.C) { c.Assert(plat, gc.DeepEquals, corecharm.Platform{Architecture: "amd64"}) } -//func (s *validatorSuite) TestDeducePlatformRiskInChannel(c *gc.C) { -// defer s.setupMocks(c).Finish() -// //model constraint default -// s.state.EXPECT().ModelConstraints().Return(constraints.Value{Arch: strptr("amd64")}, nil) -// -// arg := params.DeployFromRepositoryArg{ -// CharmName: "testme", -// Base: ¶ms.Base{ -// Name: "ubuntu", -// Channel: "22.10/stable", -// }, -// } -// plat, usedModelDefaultBase, err := s.getValidator().deducePlatform(arg) -// c.Assert(err, gc.IsNil) -// c.Assert(usedModelDefaultBase, jc.IsFalse) -// c.Assert(plat, gc.DeepEquals, corecharm.Platform{ -// Architecture: "amd64", -// OS: "ubuntu", -// Channel: "22.10", -// }) -//} - func (s *validatorSuite) TestDeducePlatformArgArchBase(c *gc.C) { defer s.setupMocks(c).Finish() From 31e24f1f4630285498decb3cdbf6740afcd59bdb Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Tue, 14 May 2024 17:01:38 +0200 Subject: [PATCH 10/12] Remove un-necessary upgrade scaffolding. Leave one behind with additional comments on how and when to use. The non state upgrades are rarely necessary. --- upgrades/operations.go | 2 -- upgrades/steps_331.go | 4 ++++ upgrades/steps_334.go | 4 ---- upgrades/steps_335.go | 4 ---- upgrades/upgrade_test.go | 2 -- 5 files changed, 4 insertions(+), 12 deletions(-) diff --git a/upgrades/operations.go b/upgrades/operations.go index d4f52a9cb63..a9db1823e00 100644 --- a/upgrades/operations.go +++ b/upgrades/operations.go @@ -32,8 +32,6 @@ var stateUpgradeOperations = func() []Operation { var upgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{version.MustParse("3.3.1"), stepsFor331()}, - upgradeToVersion{version.MustParse("3.3.4"), stepsFor334()}, - upgradeToVersion{version.MustParse("3.3.5"), stepsFor335()}, } return steps } diff --git a/upgrades/steps_331.go b/upgrades/steps_331.go index a69b2a32292..055e0dbb6a2 100644 --- a/upgrades/steps_331.go +++ b/upgrades/steps_331.go @@ -3,6 +3,10 @@ package upgrades +// stepsFor331 is a stub for how to write non-state upgrade tests. These are +// rarely necessary. They have been used in the past for upgrade steps where +// changes to workload machines are necessary. E.g. renaming the directory +// where agent binaries are placed in /var/lib/juju. func stepsFor331() []Step { return []Step{} } diff --git a/upgrades/steps_334.go b/upgrades/steps_334.go index 7a357f60b3d..bf24fec0e8d 100644 --- a/upgrades/steps_334.go +++ b/upgrades/steps_334.go @@ -3,10 +3,6 @@ package upgrades -func stepsFor334() []Step { - return []Step{} -} - func stateStepsFor334() []Step { return []Step{ &upgradeStep{ diff --git a/upgrades/steps_335.go b/upgrades/steps_335.go index 0b5d93a7d7e..2bde61ebd20 100644 --- a/upgrades/steps_335.go +++ b/upgrades/steps_335.go @@ -3,10 +3,6 @@ package upgrades -func stepsFor335() []Step { - return []Step{} -} - func stateStepsFor335() []Step { return []Step{ &upgradeStep{ diff --git a/upgrades/upgrade_test.go b/upgrades/upgrade_test.go index 19ef402bb31..1fd31e764b0 100644 --- a/upgrades/upgrade_test.go +++ b/upgrades/upgrade_test.go @@ -607,8 +607,6 @@ func (s *upgradeSuite) TestUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.UpgradeOperations)()) c.Assert(versions, gc.DeepEquals, []string{ "3.3.1", - "3.3.4", - "3.3.4", }) } From 6aacbb4ea0e2b34623a6a574720e77eb7134fb51 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Tue, 14 May 2024 17:37:28 +0200 Subject: [PATCH 11/12] Update Machine interface. --- .../client/application/mocks/application_mock.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/apiserver/facades/client/application/mocks/application_mock.go b/apiserver/facades/client/application/mocks/application_mock.go index 07d87ece0b2..0dde10e86ac 100644 --- a/apiserver/facades/client/application/mocks/application_mock.go +++ b/apiserver/facades/client/application/mocks/application_mock.go @@ -2364,6 +2364,20 @@ func (mr *MockMachineMockRecorder) HardwareCharacteristics() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HardwareCharacteristics", reflect.TypeOf((*MockMachine)(nil).HardwareCharacteristics)) } +// Id mocks base method. +func (m *MockMachine) Id() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Id") + ret0, _ := ret[0].(string) + return ret0 +} + +// Id indicates an expected call of Id. +func (mr *MockMachineMockRecorder) Id() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Id", reflect.TypeOf((*MockMachine)(nil).Id)) +} + // IsLockedForSeriesUpgrade mocks base method. func (m *MockMachine) IsLockedForSeriesUpgrade() (bool, error) { m.ctrl.T.Helper() From bca46e01e3416f7f4e32096af2533db81a706ef2 Mon Sep 17 00:00:00 2001 From: Heather Lanigan Date: Wed, 15 May 2024 14:49:51 +0200 Subject: [PATCH 12/12] Add an 3.4.3 upgrade step. Using the AssignArchToContainers method to fix the instance data of containers. The method was merged forward from 3.3.5. However we need a different upgrade step in 3.4 --- upgrades/operations.go | 1 + upgrades/steps_343.go | 16 ++++++++++++++++ upgrades/steps_343_test.go | 26 ++++++++++++++++++++++++++ upgrades/upgrade_test.go | 1 + 4 files changed, 44 insertions(+) create mode 100644 upgrades/steps_343.go create mode 100644 upgrades/steps_343_test.go diff --git a/upgrades/operations.go b/upgrades/operations.go index 3d758d08abd..20f7d642fc5 100644 --- a/upgrades/operations.go +++ b/upgrades/operations.go @@ -20,6 +20,7 @@ import ( var stateUpgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{version.MustParse("3.4.1"), stateStepsFor341()}, + upgradeToVersion{version.MustParse("3.4.3"), stateStepsFor343()}, } return steps } diff --git a/upgrades/steps_343.go b/upgrades/steps_343.go new file mode 100644 index 00000000000..0442aaa5939 --- /dev/null +++ b/upgrades/steps_343.go @@ -0,0 +1,16 @@ +// Copyright 2024 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +func stateStepsFor343() []Step { + return []Step{ + &upgradeStep{ + description: "assign architectures to container's instance data", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return context.State().AssignArchToContainers() + }, + }, + } +} diff --git a/upgrades/steps_343_test.go b/upgrades/steps_343_test.go new file mode 100644 index 00000000000..f8c860bf486 --- /dev/null +++ b/upgrades/steps_343_test.go @@ -0,0 +1,26 @@ +// Copyright 2024 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/version/v2" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/upgrades" +) + +var v343 = version.MustParse("3.4.3") + +type steps342Suite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&steps342Suite{}) + +func (s *steps342Suite) TestAssignArchToContainers(c *gc.C) { + step := findStateStep(c, v343, "assign architectures to container's instance data") + c.Assert(step.Targets(), jc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) +} diff --git a/upgrades/upgrade_test.go b/upgrades/upgrade_test.go index 52b25eaf94a..5e0059da9a7 100644 --- a/upgrades/upgrade_test.go +++ b/upgrades/upgrade_test.go @@ -598,6 +598,7 @@ func (s *upgradeSuite) TestStateUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.StateUpgradeOperations)()) c.Assert(versions, gc.DeepEquals, []string{ "3.4.1", + "3.4.3", }) }