diff --git a/internal/runtimehandlerhooks/high_performance_hooks_linux.go b/internal/runtimehandlerhooks/high_performance_hooks_linux.go index 14a7148ed05..c4984cdf9cd 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks_linux.go +++ b/internal/runtimehandlerhooks/high_performance_hooks_linux.go @@ -28,6 +28,7 @@ import ( "github.com/cri-o/cri-o/internal/log" "github.com/cri-o/cri-o/internal/oci" crioannotations "github.com/cri-o/cri-o/pkg/annotations" + "github.com/cri-o/cri-o/pkg/config" "github.com/cri-o/cri-o/utils/cmdrunner" ) @@ -68,6 +69,7 @@ type HighPerformanceHooks struct { irqSMPAffinityFileLock sync.Mutex irqBalanceConfigFileLock sync.Mutex sharedCPUs string + execCPUAffinity config.ExecCPUAffinityType irqSMPAffinityFile string } @@ -78,30 +80,78 @@ func (h *HighPerformanceHooks) PreCreate(ctx context.Context, specgen *generate. return nil } - if requestedSharedCPUs(s.Annotations(), c.CRIContainer().GetMetadata().GetName()) { - if isContainerCPUsSpecEmpty(specgen.Config) { - return fmt.Errorf("no cpus found for container %q", c.Name()) - } + var ( + exclusiveCPUSet cpuset.CPUSet + sharedCPUSet cpuset.CPUSet + err error + ) + if !isContainerCPUsSpecEmpty(specgen.Config) { cpusString := specgen.Config.Linux.Resources.CPU.Cpus - exclusiveCPUs, err := cpuset.Parse(cpusString) + exclusiveCPUSet, err = cpuset.Parse(cpusString) if err != nil { return fmt.Errorf("failed to parse container %q cpus: %w", c.Name(), err) } + } - if h.sharedCPUs == "" { - return fmt.Errorf("shared CPUs were requested for container %q but none are defined", c.Name()) + if requestedSharedCPUs(s.Annotations(), c.CRIContainer().GetMetadata().GetName()) { + if exclusiveCPUSet.IsEmpty() { + return fmt.Errorf("no cpus found for container %q", c.Name()) } - sharedCPUSet, err := cpuset.Parse(h.sharedCPUs) + sharedCPUSet, err = cpuset.Parse(h.sharedCPUs) if err != nil { return fmt.Errorf("failed to parse shared cpus: %w", err) } + + if sharedCPUSet.IsEmpty() { + return fmt.Errorf("shared CPUs were requested for container %q but none are defined", c.Name()) + } + // We must inject the environment variables in the PreCreate stage, // because in the PreStart stage the process is already constructed. // by the low-level runtime and the environment variables are already finalized. - injectCpusetEnv(specgen, &exclusiveCPUs, &sharedCPUSet) + injectCpusetEnv(specgen, &exclusiveCPUSet, &sharedCPUSet) + } + + return h.setExecCPUAffinity(ctx, specgen, &exclusiveCPUSet, &sharedCPUSet) +} + +// setExecCPUAffinity sets ExecCPUAffinity in the container spec. +func (h *HighPerformanceHooks) setExecCPUAffinity(ctx context.Context, specgen *generate.Generator, exclusiveCPUSet, sharedCPUSet *cpuset.CPUSet) error { + var execCPUSet cpuset.CPUSet + + switch h.execCPUAffinity { + case config.ExecCPUAffinityTypeFirst: + switch { + case sharedCPUSet != nil && !sharedCPUSet.IsEmpty(): + // List() is sorted, so [0] should be the least CPU. + execCPUSet = cpuset.New(sharedCPUSet.List()[0]) + case exclusiveCPUSet != nil && !exclusiveCPUSet.IsEmpty(): + execCPUSet = cpuset.New(exclusiveCPUSet.List()[0]) + default: + log.Errorf(ctx, "ExecCPUAffinityType %s is set, but no CPUSet is available. Falling back to default.", h.execCPUAffinity) + } + case config.ExecCPUAffinityTypeDefault: + // Don't set ExecCPUAffinity, which means using runtime default. + default: + // This shouldn't happen because there's config validation. + return fmt.Errorf("unknown ExecCPUAffinityType %s is used", h.execCPUAffinity) + } + + log.Debugf(ctx, "Set ExecCPUAffinity to %q", execCPUSet.String()) + + if !execCPUSet.IsEmpty() { + if specgen.Config == nil { + specgen.Config = &specs.Spec{} + } + + if specgen.Config.Process == nil { + specgen.Config.Process = &specs.Process{} + } + + specgen.Config.Process.ExecCPUAffinity = &specs.CPUAffinity{Initial: execCPUSet.String()} } return nil diff --git a/internal/runtimehandlerhooks/high_performance_hooks_test.go b/internal/runtimehandlerhooks/high_performance_hooks_test.go index 9fb089f2ef1..839ecb7c91d 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks_test.go +++ b/internal/runtimehandlerhooks/high_performance_hooks_test.go @@ -706,65 +706,215 @@ var _ = Describe("high_performance_hooks", func() { }) }) Describe("PreCreate Hook", func() { + baseSandboxBuilder := func() sandbox.Builder { + sbox := sandbox.NewBuilder() + createdAt := time.Now() + sbox.SetCreatedAt(createdAt) + sbox.SetID("sandboxID") + sbox.SetName("sandboxName") + sbox.SetLogDir("test") + sbox.SetShmPath("test") + sbox.SetNamespace("") + sbox.SetKubeName("") + sbox.SetMountLabel("test") + sbox.SetProcessLabel("test") + sbox.SetCgroupParent("") + sbox.SetRuntimeHandler("") + sbox.SetResolvPath("") + sbox.SetHostname("") + sbox.SetPortMappings([]*hostport.PortMapping{}) + sbox.SetHostNetwork(false) + sbox.SetUsernsMode("") + sbox.SetPodLinuxOverhead(nil) + sbox.SetPodLinuxResources(nil) + sbox.SetPrivileged(false) + sbox.SetHostNetwork(false) + sbox.SetCreatedAt(createdAt) + + return sbox + } + shares := uint64(2048) - g := &generate.Generator{ - Config: &specs.Spec{ - Process: &specs.Process{ - Env: make([]string, 0), - }, - Linux: &specs.Linux{ - Resources: &specs.LinuxResources{ - CPU: &specs.LinuxCPU{ - Cpus: "1,2", - Shares: &shares, + baseGenerator := func() *generate.Generator { + return &generate.Generator{ + Config: &specs.Spec{ + Process: &specs.Process{ + Env: make([]string, 0), + }, + Linux: &specs.Linux{ + Resources: &specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Shares: &shares, + }, }, }, }, - }, + } } - c, err := oci.NewContainer("containerID", "", "", "", - make(map[string]string), make(map[string]string), - make(map[string]string), "pauseImage", nil, nil, "", - &types.ContainerMetadata{Name: "cnt1"}, "sandboxID", false, false, - false, "", "", time.Now(), "") - Expect(err).ToNot(HaveOccurred()) - sbox := sandbox.NewBuilder() - createdAt := time.Now() - sbox.SetCreatedAt(createdAt) - sbox.SetID("sandboxID") - sbox.SetName("sandboxName") - sbox.SetLogDir("test") - sbox.SetShmPath("test") - sbox.SetNamespace("") - sbox.SetKubeName("") - sbox.SetMountLabel("test") - sbox.SetProcessLabel("test") - sbox.SetCgroupParent("") - sbox.SetRuntimeHandler("") - sbox.SetResolvPath("") - sbox.SetHostname("") - sbox.SetPortMappings([]*hostport.PortMapping{}) - sbox.SetHostNetwork(false) - sbox.SetUsernsMode("") - sbox.SetPodLinuxOverhead(nil) - sbox.SetPodLinuxResources(nil) - err = sbox.SetCRISandbox(sbox.ID(), make(map[string]string), map[string]string{ - crioannotations.CPUSharedAnnotation + "/" + c.CRIContainer().GetMetadata().GetName(): annotationEnable, - }, &types.PodSandboxMetadata{}) - Expect(err).ToNot(HaveOccurred()) - sbox.SetPrivileged(false) - sbox.SetHostNetwork(false) - sbox.SetCreatedAt(createdAt) - sb, err := sbox.GetSandbox() - Expect(err).ToNot(HaveOccurred()) + buildContainer := func(g *generate.Generator) (*oci.Container, error) { + c, err := oci.NewContainer("containerID", "", "", "", + make(map[string]string), make(map[string]string), + make(map[string]string), "pauseImage", nil, nil, "", + &types.ContainerMetadata{Name: "cnt1"}, "sandboxID", false, false, + false, "", "", time.Now(), "") + if err != nil { + return nil, err + } + c.SetSpec(g.Config) + + return c, nil + } + + var ( + sbSharedAnnotation *sandbox.Sandbox + sbNoSharedAnnotation *sandbox.Sandbox + genExclusiveCPUs *generate.Generator + genNoExclusiveCPUs *generate.Generator + ) + + BeforeEach(func() { + // initialize generator + genNoExclusiveCPUs = baseGenerator() - It("should inject env variable only to pod with cpu-shared.crio.io annotation", func() { - h := HighPerformanceHooks{sharedCPUs: "3,4"} - err := h.PreCreate(context.TODO(), g, sb, c) + genExclusiveCPUs = baseGenerator() + genExclusiveCPUs.Config.Linux.Resources.CPU.Cpus = "1-2" + + // initialize sandbox + sbox := baseSandboxBuilder() + err = sbox.SetCRISandbox(sbox.ID(), make(map[string]string), map[string]string{}, &types.PodSandboxMetadata{}) + sbNoSharedAnnotation, err = sbox.GetSandbox() + Expect(err).ToNot(HaveOccurred()) + + sbox = baseSandboxBuilder() + err = sbox.SetCRISandbox(sbox.ID(), make(map[string]string), map[string]string{ + crioannotations.CPUSharedAnnotation + "/cnt1": annotationEnable, + }, &types.PodSandboxMetadata{}) Expect(err).ToNot(HaveOccurred()) - env := g.Config.Process.Env - Expect(env).To(ContainElements("OPENSHIFT_ISOLATED_CPUS=1-2", "OPENSHIFT_SHARED_CPUS=3-4")) + sbSharedAnnotation, err = sbox.GetSandbox() + Expect(err).ToNot(HaveOccurred()) + }) + + var ( + g *generate.Generator + c *oci.Container + sb *sandbox.Sandbox + ) + Context("sharedCPUs && FirstExecCPUAffinity", func() { + h := HighPerformanceHooks{execCPUAffinity: config.ExecCPUAffinityTypeFirst, sharedCPUs: "3,4"} + Context("with exclusive & shared CPUs", func() { + BeforeEach(func() { + g = genExclusiveCPUs + sb = sbSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should inject env variable only to pod with cpu-shared.crio.io annotation", func() { + err = h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + env := g.Config.Process.Env + Expect(env).To(ContainElements("OPENSHIFT_ISOLATED_CPUS=1-2", "OPENSHIFT_SHARED_CPUS=3-4")) + }) + + It("should choose the first CPU in shared CPUs", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + Expect(g.Config.Process.ExecCPUAffinity.Initial).To(Equal("3")) + }) + }) + + Context("with exclusive & !shared CPUs", func() { + BeforeEach(func() { + g = genExclusiveCPUs + sb = sbNoSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should choose the first CPU in exclusive CPUs", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + Expect(g.Config.Process.ExecCPUAffinity.Initial).To(Equal("1")) + }) + }) + + Context("with !exclusive & shared CPUs", func() { + BeforeEach(func() { + g = genNoExclusiveCPUs + sb = sbSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should get an error", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("with !exclusive & !shared CPUs", func() { + BeforeEach(func() { + g = genNoExclusiveCPUs + sb = sbNoSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should not use ExecCPUAffinity", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + Expect(g.Config.Process.ExecCPUAffinity).To(BeNil()) + }) + }) + }) + + Context("No shared CPUs and FirstExecCPUAffinity", func() { + h := HighPerformanceHooks{execCPUAffinity: config.ExecCPUAffinityTypeFirst} + Context("with shared CPUs", func() { + BeforeEach(func() { + g = genExclusiveCPUs + sb = sbSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should get an error", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("with exclusive CPUs", func() { + BeforeEach(func() { + g = genExclusiveCPUs + sb = sbNoSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should choose the first CPU in exclusive CPUs", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + Expect(g.Config.Process.ExecCPUAffinity.Initial).To(Equal("1")) + }) + }) + }) + + Context("DefaultExecCPUAffinity", func() { + h := HighPerformanceHooks{execCPUAffinity: config.ExecCPUAffinityTypeDefault, sharedCPUs: "3,4"} + BeforeEach(func() { + g = genExclusiveCPUs + sb = sbSharedAnnotation + c, err = buildContainer(g) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should not use ExecCPUAffinity", func() { + err := h.PreCreate(context.TODO(), g, sb, c) + Expect(err).ToNot(HaveOccurred()) + Expect(g.Config.Process.ExecCPUAffinity).To(BeNil()) + }) }) }) Describe("Make sure that correct runtime handler hooks are set", func() { @@ -873,8 +1023,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should set the correct irq bit mask with concurrency", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should set the correct irq bit mask with concurrency", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).NotTo(BeNil()) if hph, ok := hooks.(*HighPerformanceHooks); ok { hph.irqSMPAffinityFile = irqSmpAffinityFile @@ -912,8 +1062,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should keep the current irq bit mask but return a high performance hooks", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should keep the current irq bit mask but return a high performance hooks", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).NotTo(BeNil()) hph, ok := hooks.(*HighPerformanceHooks) Expect(ok).To(BeTrue()) @@ -954,8 +1104,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should set the correct irq bit mask with concurrency", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should set the correct irq bit mask with concurrency", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).NotTo(BeNil()) if hph, ok := hooks.(*HighPerformanceHooks); ok { hph.irqSMPAffinityFile = irqSmpAffinityFile @@ -995,8 +1145,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should return a nil hook", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should return a nil hook", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).To(BeNil()) }) }) @@ -1018,8 +1168,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should set the correct irq bit mask with concurrency", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should set the correct irq bit mask with concurrency", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).NotTo(BeNil()) if hph, ok := hooks.(*HighPerformanceHooks); ok { hph.irqSMPAffinityFile = irqSmpAffinityFile @@ -1067,8 +1217,8 @@ var _ = Describe("high_performance_hooks", func() { } }) - It("should yield a DefaultCPULoadBalanceHooks which keeps the old mask", func() { - hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + It("should yield a DefaultCPULoadBalanceHooks which keeps the old mask", func(ctx context.Context) { + hooks := hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) Expect(hooks).NotTo(BeNil()) _, ok := (hooks).(*DefaultCPULoadBalanceHooks) Expect(ok).To(BeTrue()) diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go b/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go index facd0347858..1f545cc726a 100644 --- a/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go +++ b/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go @@ -40,8 +40,16 @@ func NewHooksRetriever(ctx context.Context, config *libconfig.Config) *HooksRetr // the single instance of highPerformanceHooks. // Otherwise, if crio's config allows CPU load balancing anywhere, return a DefaultCPULoadBalanceHooks. // Otherwise, return nil. -func (hr *HooksRetriever) Get(runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { +func (hr *HooksRetriever) Get(ctx context.Context, runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { if strings.Contains(runtimeName, HighPerformance) || highPerformanceAnnotationsSpecified(sandboxAnnotations) { + runtimeConfig, ok := hr.config.Runtimes[runtimeName] + if !ok { + // This shouldn't happen because runtime is already validated + log.Errorf(ctx, "Config of runtime %s is not found", runtimeName) + + return nil + } + if hr.highPerformanceHooks == nil { hr.highPerformanceHooks = &HighPerformanceHooks{ irqBalanceConfigFile: hr.config.IrqBalanceConfigFile, @@ -50,6 +58,7 @@ func (hr *HooksRetriever) Get(runtimeName string, sandboxAnnotations map[string] irqBalanceConfigFileLock: sync.Mutex{}, sharedCPUs: hr.config.SharedCPUSet, irqSMPAffinityFile: IrqSmpAffinityProcFile, + execCPUAffinity: runtimeConfig.ExecCPUAffinity, } } diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go b/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go index 4893408b15a..dce7b6c8f4d 100644 --- a/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go +++ b/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go @@ -27,7 +27,7 @@ func NewHooksRetriever(ctx context.Context, config *libconfig.Config) *HooksRetr } // Get always returns DefaultCPULoadBalanceHooks for non-linux architectures. -func (hr *HooksRetriever) Get(runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { +func (hr *HooksRetriever) Get(ctx context.Context, runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { return &DefaultCPULoadBalanceHooks{} } diff --git a/pkg/config/config.go b/pkg/config/config.go index b8937ffd143..168eda9210f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -271,8 +271,24 @@ type RuntimeHandler struct { // Default annotations specified for runtime handler if they're not overridden by // the pod spec. DefaultAnnotations map[string]string `toml:"default_annotations,omitempty"` + + // ExecCPUAffinity specifies which CPU is used when exec-ing the container. + // The valid values are: + // "": + // Use runtime default. + // "first": + // When it has only exclusive cpuset, use the first CPU in the exclusive cpuset. + // When it has both shared and exclusive cpusets, use first CPU in the shared cpuset. + ExecCPUAffinity ExecCPUAffinityType `toml:"exec_cpu_affinity,omitempty"` } +type ExecCPUAffinityType string + +const ( + ExecCPUAffinityTypeDefault ExecCPUAffinityType = "" + ExecCPUAffinityTypeFirst ExecCPUAffinityType = "first" +) + // Multiple runtime Handlers in a map. type Runtimes map[string]*RuntimeHandler @@ -1366,6 +1382,7 @@ func defaultRuntimeHandler(isSystemd bool) *RuntimeHandler { }, ContainerMinMemory: units.BytesSize(defaultContainerMinMemoryCrun), MonitorCgroup: getDefaultMonitorGroup(isSystemd), + ExecCPUAffinity: ExecCPUAffinityTypeDefault, } } @@ -1725,7 +1742,15 @@ func (r *RuntimeHandler) Validate(name string) error { logrus.Errorf("Unable to set minimum container memory for runtime handler %q: %v", name, err) } - return r.ValidateNoSyncLog() + if err := r.ValidateNoSyncLog(); err != nil { + return fmt.Errorf("no sync log: %w", err) + } + + if err := r.validateRuntimeExecCPUAffinity(); err != nil { + return err + } + + return nil } func (r *RuntimeHandler) ValidateRuntimeVMBinaryPattern() bool { @@ -1903,6 +1928,16 @@ func (r *RuntimeHandler) RuntimeDefaultAnnotations() map[string]string { return r.DefaultAnnotations } +// validateRuntimeExecCPUAffinity checks if the RuntimeHandler enforces proper CPU affinity settings. +func (r *RuntimeHandler) validateRuntimeExecCPUAffinity() error { + switch r.ExecCPUAffinity { + case ExecCPUAffinityTypeDefault, ExecCPUAffinityTypeFirst: + return nil + } + + return fmt.Errorf("invalid exec_cpu_affinity %q", r.ExecCPUAffinity) +} + func validateAllowedAndGenerateDisallowedAnnotations(allowed []string) (disallowed []string, _ error) { disallowedMap := make(map[string]bool) for _, ann := range annotations.AllAllowedAnnotations { diff --git a/server/container_create.go b/server/container_create.go index 2b6058456bd..af3e37b8dbf 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -1318,7 +1318,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, ctr container.Conta makeOCIConfigurationRootless(specgen) } - hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + hooks := s.hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) if err := s.nri.createContainer(ctx, specgen, sb, ociContainer); err != nil { return nil, err diff --git a/server/container_start.go b/server/container_start.go index 79964319e76..a2829e6eb99 100644 --- a/server/container_start.go +++ b/server/container_start.go @@ -69,7 +69,7 @@ func (s *Server) StartContainer(ctx context.Context, req *types.StartContainerRe sandbox := s.getSandbox(ctx, c.Sandbox()) - hooks := s.hooksRetriever.Get(sandbox.RuntimeHandler(), sandbox.Annotations()) + hooks := s.hooksRetriever.Get(ctx, sandbox.RuntimeHandler(), sandbox.Annotations()) if err := s.nri.startContainer(ctx, sandbox, c); err != nil { log.Warnf(ctx, "NRI start failed for container %q: %v", c.ID(), err) diff --git a/server/container_stop.go b/server/container_stop.go index 35a74d13571..fb15645aacb 100644 --- a/server/container_stop.go +++ b/server/container_stop.go @@ -48,7 +48,7 @@ func (s *Server) stopContainer(ctx context.Context, ctr *oci.Container, timeout sb := s.getSandbox(ctx, ctr.Sandbox()) - hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + hooks := s.hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()) if hooks != nil { if err := hooks.PreStop(ctx, ctr, sb); err != nil { return fmt.Errorf("failed to run pre-stop hook for container %q: %w", ctr.ID(), err) diff --git a/server/sandbox_run_linux.go b/server/sandbox_run_linux.go index 6f9a170a5c8..b653b288ac5 100644 --- a/server/sandbox_run_linux.go +++ b/server/sandbox_run_linux.go @@ -1173,7 +1173,7 @@ func (s *Server) runPodSandbox(ctx context.Context, req *types.RunPodSandboxRequ return nil, err } - if hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()); hooks != nil { + if hooks := s.hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()); hooks != nil { if err := hooks.PreStart(ctx, container, sb); err != nil { return nil, fmt.Errorf("failed to run pre-stop hook for container %q: %w", sb.ID(), err) } diff --git a/server/server.go b/server/server.go index 5ef25a292bf..ca20dcf6803 100644 --- a/server/server.go +++ b/server/server.go @@ -888,7 +888,7 @@ func (s *Server) handleExit(ctx context.Context, event fsnotify.Event) { } } - if hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()); hooks != nil { + if hooks := s.hooksRetriever.Get(ctx, sb.RuntimeHandler(), sb.Annotations()); hooks != nil { if err := hooks.PostStop(ctx, c, sb); err != nil { log.Errorf(ctx, "Failed to run post-stop hook for container %s: %v", c.ID(), err) } diff --git a/test/exec_cpu_affinity.bats b/test/exec_cpu_affinity.bats new file mode 100644 index 00000000000..7f467e5e194 --- /dev/null +++ b/test/exec_cpu_affinity.bats @@ -0,0 +1,76 @@ +#!/usr/bin/env bats +# vim: set syntax=sh: + +load helpers + +function setup() { + if ! command -v crun; then + skip "this test is supposed to run with crun" + fi + setup_test + +} + +function teardown() { + cleanup_test +} + +@test "should not specify the exec cpu affinity" { + skip_if_vm_runtime + cat << EOF > "$CRIO_CONFIG_DIR/01-workload.conf" +[crio.runtime.runtimes.high-performance] +runtime_path="$RUNTIME_BINARY_PATH" +EOF + start_crio + + ctr_id=$(crictl run --runtime high-performance "$TESTDATA/container_sleep.json" "$TESTDATA/sandbox_config.json") + + output=$(crictl inspect "$ctr_id" | jq -r .info.runtimeSpec.process.execCPUAffinity) + [ "$output" = "null" ] +} + +@test "should specify the exec cpu affinity when the container only uses exclusive cpus" { + skip_if_vm_runtime + cat << EOF > "$CRIO_CONFIG_DIR/01-workload.conf" +[crio.runtime.runtimes.high-performance] +runtime_path="$RUNTIME_BINARY_PATH" +exec_cpu_affinity = "first" +EOF + start_crio + jq ' + .linux.resources.cpu_shares = 2048 | + .linux.resources.cpuset_cpus = "0-1" + ' \ + "$TESTDATA/container_sleep.json" > "$TESTDIR/container_config.json" + ctr_id=$(crictl run --runtime high-performance "$TESTDIR/container_config.json" "$TESTDATA/sandbox_config.json") + + output=$(crictl inspect "$ctr_id" | jq -r .info.runtimeSpec.process.execCPUAffinity.initial) + echo "$output" + [ "$output" = "0" ] +} + +@test "should specify shared cpu as the exec cpu affinity when the container uses both exclusive cpus and shared cpus" { + skip_if_vm_runtime + cat << EOF > "$CRIO_CONFIG_DIR/01-workload.conf" +[crio.runtime] +shared_cpuset = "2-3" +[crio.runtime.runtimes.high-performance] +runtime_path="$RUNTIME_BINARY_PATH" +exec_cpu_affinity = "first" +allowed_annotations = ["cpu-shared.crio.io"] +EOF + start_crio + jq ' + .linux.resources.cpu_shares = 2048 | + .linux.resources.cpuset_cpus = "0-1" + ' "$TESTDATA/container_sleep.json" > "$TESTDIR/container_config.json" + jq ' + .annotations."cpu-shared.crio.io/podsandbox-sleep" = "enable" + ' "$TESTDATA/sandbox_config.json" > "$TESTDIR/sandbox_config.json" + cat "$TESTDIR/sandbox_config.json" + ctr_id=$(crictl run --runtime high-performance "$TESTDIR/container_config.json" "$TESTDIR/sandbox_config.json") + + output=$(crictl inspect "$ctr_id" | jq -r .info.runtimeSpec.process.execCPUAffinity.initial) + echo "$output" + [ "$output" = "2" ] +}