diff --git a/internal/runtimehandlerhooks/high_performance_hooks.go b/internal/runtimehandlerhooks/high_performance_hooks.go index afb013dcd7e..e725415c322 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks.go +++ b/internal/runtimehandlerhooks/high_performance_hooks.go @@ -5,15 +5,19 @@ import ( "fmt" "io/ioutil" "os" + "os/exec" "path/filepath" "strconv" "strings" - "github.com/cri-o/cri-o/internal/log" - + "github.com/cri-o/cri-o/internal/config/cgmgr" "github.com/cri-o/cri-o/internal/lib/sandbox" + "github.com/cri-o/cri-o/internal/log" "github.com/cri-o/cri-o/internal/oci" - + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/systemd" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/fields" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -25,7 +29,12 @@ const ( const ( annotationCPULoadBalancing = "cpu-load-balancing.crio.io" + annotationCPUQuota = "cpu-quota.crio.io" + annotationIRQLoadBalancing = "irq-load-balancing.crio.io" + annotationTrue = "true" schedDomainDir = "/proc/sys/kernel/sched_domain" + irqSmpAffinityProcFile = "/proc/irq/default_smp_affinity" + cgroupMountPoint = "/sys/fs/cgroup" ) // HighPerformanceHooks used to run additional hooks that will configure a system for the latency sensitive workloads @@ -33,11 +42,44 @@ type HighPerformanceHooks struct{} func (h *HighPerformanceHooks) PreStart(ctx context.Context, c *oci.Container, s *sandbox.Sandbox) error { log.Infof(ctx, "Run %q runtime handler pre-start hook for the container %q", HighPerformance, c.ID()) + + if isCgroupParentBurstable(s) { + log.Infof(ctx, "Container %q is a burstable pod. Skip PreStart.", c.ID()) + return nil + } + if isCgroupParentBestEffort(s) { + log.Infof(ctx, "Container %q is a besteffort pod. Skip PreStart.", c.ID()) + return nil + } + if !isContainerRequestWholeCPU(c) { + log.Infof(ctx, "Container %q requests partial cpu(s). Skip PreStart", c.ID()) + return nil + } + // disable the CPU load balancing for the container CPUs if shouldCPULoadBalancingBeDisabled(s.Annotations()) { + log.Infof(ctx, "Disable cpu load balancing for container %q", c.ID()) if err := setCPUSLoadBalancing(c, false, schedDomainDir); err != nil { + return errors.Wrap(err, "set CPU load balancing") + } + } + // disable the IRQ smp load balancing for the container CPUs + if shouldIRQLoadBalancingBeDisabled(s.Annotations()) { + log.Infof(ctx, "Disable irq smp balancing for container %q", c.ID()) + if err := setIRQLoadBalancing(c, false, irqSmpAffinityProcFile); err != nil { + return errors.Wrap(err, "set IRQ load balancing") + } + } + // disable the CFS quota for the container CPUs + if shouldCPUQuotaBeDisabled(s.Annotations()) { + log.Infof(ctx, "Disable cpu cfs quota for container %q", c.ID()) + cpuMountPoint, err := cgroups.FindCgroupMountpoint(cgroupMountPoint, "cpu") + if err != nil { return err } + if err := setCPUQuota(cpuMountPoint, s.CgroupParent(), c, false); err != nil { + return errors.Wrap(err, "set CPU CFS quota") + } } return nil @@ -45,18 +87,59 @@ func (h *HighPerformanceHooks) PreStart(ctx context.Context, c *oci.Container, s func (h *HighPerformanceHooks) PreStop(ctx context.Context, c *oci.Container, s *sandbox.Sandbox) error { log.Infof(ctx, "Run %q runtime handler pre-stop hook for the container %q", HighPerformance, c.ID()) + + if isCgroupParentBurstable(s) { + log.Infof(ctx, "Container %q is a burstable pod. Skip PreStop.", c.ID()) + return nil + } + if isCgroupParentBestEffort(s) { + log.Infof(ctx, "Container %q is a besteffort pod. Skip PreStop.", c.ID()) + return nil + } + if !isContainerRequestWholeCPU(c) { + log.Infof(ctx, "Container %q requests partial cpu(s). Skip PreStop", c.ID()) + return nil + } + // enable the CPU load balancing for the container CPUs if shouldCPULoadBalancingBeDisabled(s.Annotations()) { if err := setCPUSLoadBalancing(c, true, schedDomainDir); err != nil { - return err + return errors.Wrap(err, "set CPU load balancing") } } + // enable the IRQ smp balancing for the container CPUs + if shouldIRQLoadBalancingBeDisabled(s.Annotations()) { + if err := setIRQLoadBalancing(c, true, irqSmpAffinityProcFile); err != nil { + return errors.Wrap(err, "set IRQ load balancing") + } + } + // no need to reverse the cgroup CPU CFS quota setting as the pod cgroup will be deleted anyway return nil } func shouldCPULoadBalancingBeDisabled(annotations fields.Set) bool { - return annotations[annotationCPULoadBalancing] == "true" + return annotations[annotationCPULoadBalancing] == annotationTrue +} + +func shouldCPUQuotaBeDisabled(annotations fields.Set) bool { + return annotations[annotationCPUQuota] == annotationTrue +} + +func shouldIRQLoadBalancingBeDisabled(annotations fields.Set) bool { + return annotations[annotationIRQLoadBalancing] == annotationTrue +} + +func isCgroupParentBurstable(s *sandbox.Sandbox) bool { + return strings.Contains(s.CgroupParent(), "burstable") +} + +func isCgroupParentBestEffort(s *sandbox.Sandbox) bool { + return strings.Contains(s.CgroupParent(), "besteffort") +} + +func isContainerRequestWholeCPU(c *oci.Container) bool { + return *(c.Spec().Linux.Resources.CPU.Shares)%1024 == 0 } func setCPUSLoadBalancing(c *oci.Container, enable bool, schedDomainDir string) error { @@ -65,7 +148,7 @@ func setCPUSLoadBalancing(c *oci.Container, enable bool, schedDomainDir string) lspec.Resources == nil || lspec.Resources.CPU == nil || lspec.Resources.CPU.Cpus == "" { - return fmt.Errorf("failed to find the container %q CPUs", c.ID()) + return errors.Errorf("find container %s CPUs", c.ID()) } cpus, err := cpuset.Parse(lspec.Resources.CPU.Cpus) @@ -113,3 +196,96 @@ func setCPUSLoadBalancing(c *oci.Container, enable bool, schedDomainDir string) return nil } + +func setIRQLoadBalancing(c *oci.Container, enable bool, irqSmpAffinityFile string) error { + lspec := c.Spec().Linux + if lspec == nil || + lspec.Resources == nil || + lspec.Resources.CPU == nil || + lspec.Resources.CPU.Cpus == "" { + return errors.Errorf("find container %s CPUs", c.ID()) + } + + content, err := ioutil.ReadFile(irqSmpAffinityFile) + if err != nil { + return err + } + currentIRQSMPSetting := strings.TrimSpace(string(content)) + newIRQSMPSetting, newIRQBalanceSetting, err := UpdateIRQSmpAffinityMask(lspec.Resources.CPU.Cpus, currentIRQSMPSetting, enable) + if err != nil { + return err + } + if err := ioutil.WriteFile(irqSmpAffinityFile, []byte(newIRQSMPSetting), 0o644); err != nil { + return err + } + if _, err := exec.LookPath("irqbalance"); err != nil { + // irqbalance is not installed, skip the rest; pod should still start, so return nil instead + logrus.Warnf("irqbalance binary not found: %v", err) + return nil + } + // run irqbalance in daemon mode, so this won't cause delay + cmd := exec.Command("irqbalance", "--oneshot") + additionalEnv := "IRQBALANCE_BANNED_CPUS=" + newIRQBalanceSetting + cmd.Env = append(os.Environ(), additionalEnv) + return cmd.Run() +} + +func setCPUQuota(cpuMountPoint, parentDir string, c *oci.Container, enable bool) error { + var rpath string + var err error + var cfsQuotaPath string + var parentCfsQuotaPath string + var cgroupManager cgmgr.CgroupManager + + if strings.HasSuffix(parentDir, ".slice") { + // systemd fs + if cgroupManager, err = cgmgr.SetCgroupManager("systemd"); err != nil { + return nil + } + parentPath, err := systemd.ExpandSlice(parentDir) + if err != nil { + return err + } + parentCfsQuotaPath = filepath.Join(cpuMountPoint, parentPath, "cpu.cfs_quota_us") + if rpath, err = cgroupManager.ContainerCgroupAbsolutePath(parentDir, c.ID()); err != nil { + return err + } + cfsQuotaPath = filepath.Join(cpuMountPoint, rpath, "cpu.cfs_quota_us") + } else { + // cgroupfs + if cgroupManager, err = cgmgr.SetCgroupManager("cgroupfs"); err != nil { + return nil + } + parentCfsQuotaPath = filepath.Join(cpuMountPoint, parentDir, "cpu.cfs_quota_us") + if rpath, err = cgroupManager.ContainerCgroupAbsolutePath(parentDir, c.ID()); err != nil { + return err + } + cfsQuotaPath = filepath.Join(cpuMountPoint, rpath, "cpu.cfs_quota_us") + } + + if _, err := os.Stat(cfsQuotaPath); err != nil { + return err + } + if _, err := os.Stat(parentCfsQuotaPath); err != nil { + return err + } + + if enable { + // there should have no use case to get here, as the pod cgroup will be deleted when the pod end + if err := ioutil.WriteFile(cfsQuotaPath, []byte("0"), 0o644); err != nil { + return err + } + if err := ioutil.WriteFile(parentCfsQuotaPath, []byte("0"), 0o644); err != nil { + return err + } + } else { + if err := ioutil.WriteFile(cfsQuotaPath, []byte("-1"), 0o644); err != nil { + return err + } + if err := ioutil.WriteFile(parentCfsQuotaPath, []byte("-1"), 0o644); err != nil { + return err + } + } + + return nil +} diff --git a/internal/runtimehandlerhooks/high_performance_hooks_test.go b/internal/runtimehandlerhooks/high_performance_hooks_test.go index d55347adaf9..497a74ba389 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks_test.go +++ b/internal/runtimehandlerhooks/high_performance_hooks_test.go @@ -21,79 +21,202 @@ const ( ) // The actual test suite -var _ = Describe("setCPUSLoadBalancing", func() { - var container *oci.Container +var _ = Describe("high_performance_hooks", func() { + container, err := oci.NewContainer("containerID", "", "", "", + make(map[string]string), make(map[string]string), + make(map[string]string), "pauseImage", "", "", + &pb.ContainerMetadata{}, "sandboxID", false, false, + false, "", "", time.Now(), "") + Expect(err).To(BeNil()) + var flags string - verifySetCPULoadBalancing := func(enabled bool, expected string) { - err := setCPUSLoadBalancing(container, enabled, fixturesDir) + BeforeEach(func() { + err := os.MkdirAll(fixturesDir, os.ModePerm) Expect(err).To(BeNil()) + }) - for _, cpu := range []string{"cpu0", "cpu1"} { - content, err := ioutil.ReadFile(filepath.Join(fixturesDir, cpu, "domain0", "flags")) + AfterEach(func() { + err := os.RemoveAll(fixturesDir) + Expect(err).To(BeNil()) + }) + + Describe("setCPUSLoadBalancing", func() { + verifySetCPULoadBalancing := func(enabled bool, expected string) { + err := setCPUSLoadBalancing(container, enabled, fixturesDir) Expect(err).To(BeNil()) - Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) + for _, cpu := range []string{"cpu0", "cpu1"} { + content, err := ioutil.ReadFile(filepath.Join(fixturesDir, cpu, "domain0", "flags")) + Expect(err).To(BeNil()) + + Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) + } } - } - - JustBeforeEach(func() { - var err error - container, err = oci.NewContainer("containerID", "", "", "", - make(map[string]string), make(map[string]string), - make(map[string]string), "pauseImage", "", "", - &pb.ContainerMetadata{}, "sandboxID", false, false, - false, "", "", time.Now(), "") - Expect(err).To(BeNil()) - // set container CPUs - container.SetSpec( - &specs.Spec{ - Linux: &specs.Linux{ - Resources: &specs.LinuxResources{ - CPU: &specs.LinuxCPU{ - Cpus: "0,1", + JustBeforeEach(func() { + // set container CPUs + container.SetSpec( + &specs.Spec{ + Linux: &specs.Linux{ + Resources: &specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Cpus: "0,1", + }, }, }, }, - }, - ) + ) + + // create tests flags files + for _, cpu := range []string{"cpu0", "cpu1"} { + flagsDir := filepath.Join(fixturesDir, cpu, "domain0") + err = os.MkdirAll(flagsDir, os.ModePerm) + Expect(err).To(BeNil()) - // create tests flags files - for _, cpu := range []string{"cpu0", "cpu1"} { - flagsDir := filepath.Join(fixturesDir, cpu, "domain0") - err = os.MkdirAll(flagsDir, os.ModePerm) + err = ioutil.WriteFile(filepath.Join(flagsDir, "flags"), []byte(flags), 0o644) + Expect(err).To(BeNil()) + } + }) + + AfterEach(func() { + for _, cpu := range []string{"cpu0", "cpu1"} { + if err := os.RemoveAll(filepath.Join(fixturesDir, cpu)); err != nil { + log.Errorf(context.TODO(), "failed to remove temporary test files: %v", err) + } + } + }) + + Context("with enabled equals to true", func() { + BeforeEach(func() { + flags = "4142" + }) + + It("should enable the CPU load balancing", func() { + verifySetCPULoadBalancing(true, "4143") + }) + }) + + Context("with enabled equals to false", func() { + BeforeEach(func() { + flags = "4143" + }) + + It("should disable the CPU load balancing", func() { + verifySetCPULoadBalancing(false, "4142") + }) + }) + }) + + Describe("setIRQLoadBalancing", func() { + irqSmpAffinityFile := filepath.Join(fixturesDir, "irq_smp_affinity") + verifySetIRQLoadBalancing := func(enabled bool, expected string) { + err := setIRQLoadBalancing(container, enabled, irqSmpAffinityFile) Expect(err).To(BeNil()) - err = ioutil.WriteFile(filepath.Join(flagsDir, "flags"), []byte(flags), 0o644) + content, err := ioutil.ReadFile(irqSmpAffinityFile) Expect(err).To(BeNil()) + + Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) } + + JustBeforeEach(func() { + // set container CPUs + container.SetSpec( + &specs.Spec{ + Linux: &specs.Linux{ + Resources: &specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Cpus: "4,5", + }, + }, + }, + }, + ) + + // create tests affinity file + err = ioutil.WriteFile(irqSmpAffinityFile, []byte(flags), 0o644) + Expect(err).To(BeNil()) + }) + + Context("with enabled equals to true", func() { + BeforeEach(func() { + flags = "0000,00003003" + }) + + It("should set the irq bit mask", func() { + verifySetIRQLoadBalancing(true, "00000000,00003033") + }) + }) + + Context("with enabled equals to false", func() { + BeforeEach(func() { + flags = "00000000,00003033" + }) + + It("should clear the irq bit mask", func() { + verifySetIRQLoadBalancing(false, "00000000,00003003") + }) + }) }) - AfterEach(func() { - for _, cpu := range []string{"cpu0", "cpu1"} { - err := os.RemoveAll(filepath.Join(fixturesDir, cpu)) - log.Errorf(context.TODO(), "failed to remove temporary test files: %v", err) + Describe("setCPUQuota", func() { + containerID := container.ID() + parent := "parent.slice" + child := "crio" + "-" + containerID + ".scope" + childCgroup := parent + ":" + "crio" + ":" + containerID + cpuMountPoint := filepath.Join(fixturesDir, "cgroup", "cpu") + parentFolder := filepath.Join(cpuMountPoint, parent) + childFolder := filepath.Join(cpuMountPoint, parent, child) + + verifySetCPUQuota := func(enabled bool, expected string) { + err := setCPUQuota(cpuMountPoint, parent, container, enabled) + Expect(err).To(BeNil()) + + content, err := ioutil.ReadFile(filepath.Join(childFolder, "cpu.cfs_quota_us")) + Expect(err).To(BeNil()) + Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) + + content, err = ioutil.ReadFile(filepath.Join(parentFolder, "cpu.cfs_quota_us")) + Expect(err).To(BeNil()) + Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) } - }) - Context("with enabled equals to true", func() { BeforeEach(func() { - flags = "4142" + if err := os.MkdirAll(childFolder, os.ModePerm); err != nil { + log.Errorf(context.TODO(), "failed to create temporary cgroup folder: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(parentFolder, "cpu.cfs_quota_us"), []byte("900\n"), 0o644); err != nil { + log.Errorf(context.TODO(), "failed to create cpu.cfs_quota_us cgroup file: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(childFolder, "cpu.cfs_quota_us"), []byte("900\n"), 0o644); err != nil { + log.Errorf(context.TODO(), "failed to create cpu.cfs_quota_us cgroup file: %v", err) + } + container.SetSpec( + &specs.Spec{ + Linux: &specs.Linux{ + CgroupsPath: childCgroup, + }, + }, + ) }) - It("should enable the CPU load balancing", func() { - verifySetCPULoadBalancing(true, "4143") + AfterEach(func() { + if err := os.RemoveAll(parentFolder); err != nil { + log.Errorf(context.TODO(), "failed to remove temporary cgroup folder: %v", err) + } }) - }) - Context("with enabled equals to false", func() { - BeforeEach(func() { - flags = "4143" + Context("with enabled equals to true", func() { + It("should set cpu.cfs_quota_us to 0", func() { + verifySetCPUQuota(true, "0") + }) }) - It("should disable the CPU load balancing", func() { - verifySetCPULoadBalancing(false, "4142") + Context("with enabled equals to false", func() { + It("should set cpu.cfs_quota_us to -1", func() { + verifySetCPUQuota(false, "-1") + }) }) }) }) diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks_suite_test.go b/internal/runtimehandlerhooks/runtime_handler_hooks_suite_test.go new file mode 100644 index 00000000000..8e77d65842e --- /dev/null +++ b/internal/runtimehandlerhooks/runtime_handler_hooks_suite_test.go @@ -0,0 +1,13 @@ +package runtimehandlerhooks + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestHighPerformanceHooks(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "high_performance_hooks Suite") +} diff --git a/internal/runtimehandlerhooks/utils.go b/internal/runtimehandlerhooks/utils.go new file mode 100644 index 00000000000..7ee4f18d222 --- /dev/null +++ b/internal/runtimehandlerhooks/utils.go @@ -0,0 +1,123 @@ +package runtimehandlerhooks + +import ( + "encoding/hex" + "fmt" + "strings" + "unicode" + + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" +) + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} + +func cpuMaskByte(c int) byte { + return byte(1 << c) +} + +func mapHexCharToByte(h string) ([]byte, error) { + l := len(h) + var hexin string + if l%2 != 0 { + // expect even number of chars + hexin = "0" + h + } else { + hexin = h + } + + breversed, err := hex.DecodeString(hexin) + if err != nil { + return nil, err + } + + l = len(breversed) + var barray []byte + var rindex int + for i := 0; i < l; i++ { + rindex = l - i - 1 + barray = append(barray, breversed[rindex]) + } + return barray, nil +} + +func mapByteToHexChar(b []byte) string { + var breversed []byte + var rindex int + l := len(b) + // align it to 8 byte + if l%8 != 0 { + lfill := 8 - l%8 + l += lfill + for i := 0; i < lfill; i++ { + b = append(b, byte(0)) + } + } + + for i := 0; i < l; i++ { + rindex = l - i - 1 + breversed = append(breversed, b[rindex]) + } + return hex.EncodeToString(breversed) +} + +// take a byte array and invert each byte +func invertByteArray(in []byte) (out []byte) { + for _, b := range in { + out = append(out, byte(0xff)-b) + } + return +} + +// UpdateIRQSmpAffinityMask take input cpus that need to change irq affinity mask and +// the current mask string, return an update mask string and inverted mask, with those cpus +// enabled or disable in the mask. +func UpdateIRQSmpAffinityMask(cpus, current string, set bool) (cpuMask, bannedCPUMask string, err error) { + podcpuset, err := cpuset.Parse(cpus) + if err != nil { + return cpus, "", err + } + + // only ascii string supported + if !isASCII(current) { + return cpus, "", fmt.Errorf("non ascii character detected: %s", current) + } + + // remove ","; now each element is "0-9,a-f" + s := strings.ReplaceAll(current, ",", "") + + // the index 0 corresponds to the cpu 0-7 + currentMaskArray, err := mapHexCharToByte(s) + if err != nil { + return cpus, "", err + } + invertedMaskArray := invertByteArray(currentMaskArray) + + for _, cpu := range podcpuset.ToSlice() { + if set { + // each byte represent 8 cpus + currentMaskArray[cpu/8] |= cpuMaskByte(cpu % 8) + invertedMaskArray[cpu/8] &^= cpuMaskByte(cpu % 8) + } else { + currentMaskArray[cpu/8] &^= cpuMaskByte(cpu % 8) + invertedMaskArray[cpu/8] |= cpuMaskByte(cpu % 8) + } + } + + maskString := mapByteToHexChar(currentMaskArray) + invertedMaskString := mapByteToHexChar(invertedMaskArray) + + maskStringWithComma := maskString[0:8] + invertedMaskStringWithComma := invertedMaskString[0:8] + for i := 8; i+8 <= len(maskString); i += 8 { + maskStringWithComma = maskStringWithComma + "," + maskString[i:i+8] + invertedMaskStringWithComma = invertedMaskStringWithComma + "," + invertedMaskString[i:i+8] + } + return maskStringWithComma, invertedMaskStringWithComma, nil +} diff --git a/internal/runtimehandlerhooks/utils_test.go b/internal/runtimehandlerhooks/utils_test.go new file mode 100644 index 00000000000..3954bc77c39 --- /dev/null +++ b/internal/runtimehandlerhooks/utils_test.go @@ -0,0 +1,50 @@ +package runtimehandlerhooks + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +var _ = Describe("Utils", func() { + Describe("UpdateIRQSmpAffinityMask", func() { + type Input struct { + cpus string + mask string + set bool + } + type Expected struct { + mask string + invMask string + } + type TestData struct { + input Input + expected Expected + } + + DescribeTable("testing cpu mask", + func(c TestData) { + mask, invMask, err := UpdateIRQSmpAffinityMask(c.input.cpus, c.input.mask, c.input.set) + Expect(err).To(BeNil()) + Expect(mask).To(Equal(c.expected.mask)) + Expect(invMask).To(Equal(c.expected.invMask)) + }, + Entry("clear a single bit that was one", TestData{ + input: Input{cpus: "0", mask: "0000,00003003", set: false}, + expected: Expected{mask: "00000000,00003002", invMask: "0000ffff,ffffcffd"}, + }), + Entry("set a single bit that was zero", TestData{ + input: Input{cpus: "4", mask: "0000,00003003", set: true}, + expected: Expected{mask: "00000000,00003013", invMask: "0000ffff,ffffcfec"}, + }), + Entry("clear a set of bits", TestData{ + input: Input{cpus: "4-13", mask: "ffff,ffffffff", set: false}, + expected: Expected{mask: "0000ffff,ffffc00f", invMask: "00000000,00003ff0"}, + }), + Entry("set a set of bits", TestData{ + input: Input{cpus: "4-13", mask: "ffff,ffffc00f", set: true}, + expected: Expected{mask: "0000ffff,ffffffff", invMask: "00000000,00000000"}, + }), + ) + }) +})