Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pillar: Adapt pillar to use native cpupinning #4430

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 17 additions & 31 deletions pkg/pillar/cmd/domainmgr/domainmgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -1243,38 +1243,24 @@ func setCgroupCpuset(config *types.DomainConfig, status *types.DomainStatus) err
log.Warnf("Failed to find cgroups directory for %s", config.DisplayName)
return nil
}
err = controller.Update(&specs.LinuxResources{CPU: &specs.LinuxCPU{Cpus: status.VmConfig.CPUs}})
// Convert a list of CPUs to a CPU string
cpuStrings := make([]string, 0)
for _, cpu := range status.VmConfig.CPUs {
cpuStrings = append(cpuStrings, strconv.Itoa(cpu))
}
cpuMask := strings.Join(cpuStrings, ",")

err = controller.Update(&specs.LinuxResources{CPU: &specs.LinuxCPU{Cpus: cpuMask}})
if err != nil {
log.Warnf("Failed to update CPU set for %s", config.DisplayName)
return err
}
log.Functionf("Adjust the cgroups cpuset of %s to %s", config.DisplayName, status.VmConfig.CPUs)
log.Functionf("Adjust the cgroups cpuset of %s to %v", config.DisplayName, status.VmConfig.CPUs)
return nil
}

// constructNonPinnedCpumaskString returns a cpumask that contains at least CPUs reserved for the system
// services. Hence, it can never be empty.
func constructNonPinnedCpumaskString(ctx *domainContext) string {
result := ""
for _, cpu := range ctx.cpuAllocator.GetAllFree() {
addToMask(cpu, &result)
}
return result
}

func addToMask(cpu int, s *string) {
if s == nil {
return
}
if *s == "" {
*s = fmt.Sprintf("%d", cpu)
} else {
*s = fmt.Sprintf("%s,%d", *s, cpu)
}
}

func updateNonPinnedCPUs(ctx *domainContext, config *types.DomainConfig, status *types.DomainStatus) error {
status.VmConfig.CPUs = constructNonPinnedCpumaskString(ctx)
status.VmConfig.CPUs = ctx.cpuAllocator.GetAllFree()
err := setCgroupCpuset(config, status)
if err != nil {
return errors.New("failed to redistribute CPUs between VMs, can affect the inter-VM isolation")
Expand All @@ -1292,23 +1278,23 @@ func assignCPUs(ctx *domainContext, config *types.DomainConfig, status *types.Do
return errors.New("failed to allocate necessary amount of CPUs")
}
for _, cpu := range cpusToAssign {
addToMask(cpu, &status.VmConfig.CPUs)
status.VmConfig.CPUs = append(status.VmConfig.CPUs, cpu)
}
} else { // VM has no pinned CPUs, assign all the CPUs from the shared set
status.VmConfig.CPUs = constructNonPinnedCpumaskString(ctx)
status.VmConfig.CPUs = ctx.cpuAllocator.GetAllFree()
}
return nil
}

// releaseCPUs releases the CPUs that were previously assigned to the VM.
// The cpumask in the *status* is updated accordingly, and the CPUs are released in the CPUAllocator context.
func releaseCPUs(ctx *domainContext, config *types.DomainConfig, status *types.DomainStatus) {
if ctx.cpuPinningSupported && config.VmConfig.CPUsPinned && status.VmConfig.CPUs != "" {
if ctx.cpuPinningSupported && config.VmConfig.CPUsPinned && status.VmConfig.CPUs != nil {
if err := ctx.cpuAllocator.Free(config.UUIDandVersion.UUID); err != nil {
log.Errorf("Failed to free CPUs for %s: %s", config.DisplayName, err)
}
}
status.VmConfig.CPUs = ""
status.VmConfig.CPUs = nil
}

func handleCreate(ctx *domainContext, key string, config *types.DomainConfig) {
Expand All @@ -1330,7 +1316,7 @@ func handleCreate(ctx *domainContext, key string, config *types.DomainConfig) {
Service: config.Service,
}

status.VmConfig.CPUs = ""
status.VmConfig.CPUs = make([]int, 0)

// Note that the -emu interface doesn't exist until after boot of the domU, but we
// initialize the VifList here with the VifUsed.
Expand Down Expand Up @@ -1545,7 +1531,7 @@ func doActivate(ctx *domainContext, config types.DomainConfig,
publishDomainStatus(ctx, status)
return
}
log.Functionf("CPUs for %s assigned: %s", config.DisplayName, status.VmConfig.CPUs)
log.Functionf("CPUs for %s assigned: %v", config.DisplayName, status.VmConfig.CPUs)
}

if errDescription := reserveAdapters(ctx, config); errDescription != nil {
Expand Down Expand Up @@ -1932,7 +1918,7 @@ func doCleanup(ctx *domainContext, status *types.DomainStatus) {
}
triggerCPUNotification()
}
status.VmConfig.CPUs = ""
status.VmConfig.CPUs = nil
}
releaseAdapters(ctx, status.IoAdapterList, status.UUIDandVersion.UUID,
status)
Expand Down
8 changes: 6 additions & 2 deletions pkg/pillar/containerd/oci.go
Original file line number Diff line number Diff line change
Expand Up @@ -486,8 +486,12 @@ func (s *ociSpec) UpdateFromDomain(dom *types.DomainConfig, status *types.Domain
s.Linux.Resources.Memory.Limit = &m
s.Linux.Resources.CPU.Period = &p
s.Linux.Resources.CPU.Quota = &q
if status.VmConfig.CPUs != "" {
s.Linux.Resources.CPU.Cpus = status.VmConfig.CPUs
if len(status.VmConfig.CPUs) != 0 {
cpusAsString := make([]string, len(status.VmConfig.CPUs))
for i, cpu := range status.VmConfig.CPUs {
cpusAsString[i] = fmt.Sprintf("%d", cpu)
}
s.Linux.Resources.CPU.Cpus = strings.Join(cpusAsString, ",")
}

s.Linux.CgroupsPath = fmt.Sprintf("/%s/%s", ctrdServicesNamespace, dom.GetTaskName())
Expand Down
20 changes: 14 additions & 6 deletions pkg/pillar/hypervisor/kvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,12 +70,6 @@ const qemuConfTemplate = `# This file is automatically generated by domainmgr
[machine]
type = "{{.Machine}}"
dump-guest-core = "off"
{{- if .DomainStatus.CPUs }}
cpumask = "{{.DomainStatus.CPUs}}"
{{- end -}}
{{- if .DomainConfig.CPUsPinned }}
cpu-pin = "on"
{{- end -}}
{{- if eq .Machine "virt" }}
accel = "kvm:tcg"
gic-version = "host"
Expand Down Expand Up @@ -842,6 +836,20 @@ func (ctx KvmContext) Setup(status types.DomainStatus, config types.DomainConfig
"-readconfig", file.Name(),
"-pidfile", kvmStateDir+domainName+"/pid")

// Add CPUs affinity as a parameter to qemu.
// It's not supported to be configured in the .ini file so we need to add it here.
// The arguments are in the format of: -object thread-context,id=tc1,cpu-affinity=0-1,cpu-affinity=6-7
// The thread-context object is introduced in qemu 7.2
if config.CPUsPinned {
// Create the thread-context object string
threadContext := "thread-context,id=tc1"
for _, cpu := range status.CPUs {
// Add the cpu-affinity arguments to the thread-context object
threadContext += fmt.Sprintf(",cpu-affinity=%d", cpu)
}
args = append(args, "-object", threadContext)
}

spec, err := ctx.setupSpec(&status, &config, status.OCIConfigDir)

if err != nil {
Expand Down
8 changes: 6 additions & 2 deletions pkg/pillar/hypervisor/xen.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,12 @@ func (ctx xenContext) CreateDomConfig(domainName string,
maxCpus = vCpus
}
file.WriteString(fmt.Sprintf("maxvcpus = %d\n", maxCpus))
if config.CPUs != "" {
file.WriteString(fmt.Sprintf("cpus = \"%s\"\n", config.CPUs))
if len(config.CPUs) > 0 {
cpusString := make([]string, 0)
for _, curCPU := range config.CPUs {
cpusString = append(cpusString, strconv.Itoa(curCPU))
}
file.WriteString(fmt.Sprintf("cpus = \"%s\"\n", strings.Join(cpusString, ",")))
}
if config.DeviceTree != "" {
file.WriteString(fmt.Sprintf("device_tree = \"%s\"\n",
Expand Down
2 changes: 1 addition & 1 deletion pkg/pillar/types/domainmgrtypes.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ type VmConfig struct {
ExtraArgs string // added to bootargs
BootLoader string // default ""
// For CPU pinning
CPUs string // default "", list of "1,2"
CPUs []int // default nil, list of [1,2]
// Needed for device passthru
DeviceTree string // default ""; sets device_tree
// Example: device_tree="guest-gpio.dtb"
Expand Down

This file was deleted.

Loading
Loading