diff --git a/internal/lib/container_server.go b/internal/lib/container_server.go index fe607fc33d7..caecc1c0f84 100644 --- a/internal/lib/container_server.go +++ b/internal/lib/container_server.go @@ -136,23 +136,23 @@ func New(ctx context.Context, configIface libconfig.Iface) (*ContainerServer, er } // LoadSandbox loads a sandbox from the disk into the sandbox store -func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr error) { +func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (sb *sandbox.Sandbox, retErr error) { config, err := c.store.FromContainerDirectory(id, "config.json") if err != nil { - return err + return nil, err } var m rspec.Spec if err := json.Unmarshal(config, &m); err != nil { - return errors.Wrap(err, "error unmarshalling sandbox spec") + return nil, errors.Wrap(err, "error unmarshalling sandbox spec") } labels := make(map[string]string) if err := json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil { - return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels) + return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels) } name := m.Annotations[annotations.Name] name, err = c.ReservePodName(id, name) if err != nil { - return err + return nil, err } defer func() { if retErr != nil { @@ -161,7 +161,7 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er }() var metadata sandbox.Metadata if err := json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil { - return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata) + return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata) } processLabel := m.Process.SelinuxLabel @@ -171,29 +171,29 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er kubeAnnotations := make(map[string]string) if err := json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil { - return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations) + return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations) } portMappings := []*hostport.PortMapping{} if err := json.Unmarshal([]byte(m.Annotations[annotations.PortMappings]), &portMappings); err != nil { - return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings) + return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings) } privileged := isTrue(m.Annotations[annotations.PrivilegedRuntime]) hostNetwork := isTrue(m.Annotations[annotations.HostNetwork]) nsOpts := types.NamespaceOption{} if err := json.Unmarshal([]byte(m.Annotations[annotations.NamespaceOptions]), &nsOpts); err != nil { - return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions) + return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions) } created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created]) if err != nil { - return errors.Wrap(err, "parsing created timestamp annotation") + return nil, errors.Wrap(err, "parsing created timestamp annotation") } - sb, err := sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created, m.Annotations[crioann.UsernsModeAnnotation]) + sb, err = sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created, m.Annotations[crioann.UsernsModeAnnotation]) if err != nil { - return err + return nil, err } sb.AddHostnamePath(m.Annotations[annotations.HostnamePath]) sb.SetSeccompProfilePath(spp) @@ -221,13 +221,13 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er path, err := configNsPath(&m, namespaceToJoin.rspecNS) if err == nil { if nsErr := namespaceToJoin.joinFunc(path); nsErr != nil { - return nsErr + return sb, nsErr } } } if err := c.AddSandbox(sb); err != nil { - return err + return sb, err } defer func() { @@ -240,19 +240,19 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er sandboxPath, err := c.store.ContainerRunDirectory(id) if err != nil { - return err + return sb, err } sandboxDir, err := c.store.ContainerDirectory(id) if err != nil { - return err + return sb, err } cID := m.Annotations[annotations.ContainerID] cname, err := c.ReserveContainerName(cID, m.Annotations[annotations.ContainerName]) if err != nil { - return err + return sb, err } defer func() { if retErr != nil { @@ -272,7 +272,7 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er if !wasSpoofed { scontainer, err = oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], labels, m.Annotations, kubeAnnotations, m.Annotations[annotations.Image], "", "", nil, id, false, false, false, sb.RuntimeHandler(), sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"]) if err != nil { - return err + return sb, err } scontainer.SetSpec(&m) scontainer.SetMountPoint(m.Annotations[annotations.MountPoint]) @@ -280,7 +280,7 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er if m.Annotations[annotations.Volumes] != "" { containerVolumes := []oci.ContainerVolume{} if err = json.Unmarshal([]byte(m.Annotations[annotations.Volumes]), &containerVolumes); err != nil { - return fmt.Errorf("failed to unmarshal container volumes: %v", err) + return sb, fmt.Errorf("failed to unmarshal container volumes: %v", err) } for _, cv := range containerVolumes { scontainer.AddVolume(cv) @@ -291,28 +291,28 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er } if err := c.ContainerStateFromDisk(ctx, scontainer); err != nil { - return fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err) + return sb, fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err) } // We write back the state because it is possible that crio did not have a chance to // read the exit file and persist exit code into the state on reboot. if err := c.ContainerStateToDisk(ctx, scontainer); err != nil { - return fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err) + return sb, fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err) } if err := sb.SetInfraContainer(scontainer); err != nil { - return err + return sb, err } sb.SetCreated() if err := label.ReserveLabel(processLabel); err != nil { - return err + return sb, err } sb.RestoreStopped() if err := c.ctrIDIndex.Add(scontainer.ID()); err != nil { - return err + return sb, err } defer func() { if retErr != nil { @@ -322,9 +322,9 @@ func (c *ContainerServer) LoadSandbox(ctx context.Context, id string) (retErr er } }() if err := c.podIDIndex.Add(id); err != nil { - return err + return sb, err } - return nil + return sb, nil } func configNsPath(spec *rspec.Spec, nsType rspec.LinuxNamespaceType) (string, error) { diff --git a/internal/lib/container_server_test.go b/internal/lib/container_server_test.go index 02c3ef7f2cd..15830f76d57 100644 --- a/internal/lib/container_server_test.go +++ b/internal/lib/container_server_test.go @@ -147,9 +147,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(testManifest) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).To(BeNil()) }) @@ -163,9 +164,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(manifest) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).To(BeNil()) }) @@ -179,9 +181,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(manifest) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).To(BeNil()) }) @@ -190,9 +193,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(testManifest) // When - err := sut.LoadSandbox(context.Background(), "") + sb, err := sut.LoadSandbox(context.Background(), "") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -205,9 +209,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(manifest) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -220,9 +225,10 @@ var _ = t.Describe("ContainerServer", func() { mockDirs(manifest) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -239,9 +245,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -256,9 +263,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -275,9 +283,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -294,9 +303,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -313,9 +323,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -332,9 +343,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -351,9 +363,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -370,9 +383,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).NotTo(BeNil()) Expect(err).NotTo(BeNil()) }) @@ -385,9 +399,10 @@ var _ = t.Describe("ContainerServer", func() { ) // When - err := sut.LoadSandbox(context.Background(), "id") + sb, err := sut.LoadSandbox(context.Background(), "id") // Then + Expect(sb).To(BeNil()) Expect(err).NotTo(BeNil()) }) }) diff --git a/server/server.go b/server/server.go index b43313339b1..c9b0c596bd4 100644 --- a/server/server.go +++ b/server/server.go @@ -18,6 +18,7 @@ import ( imageTypes "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/idtools" + storageTypes "github.com/containers/storage/types" "github.com/cri-o/cri-o/internal/hostport" "github.com/cri-o/cri-o/internal/lib" "github.com/cri-o/cri-o/internal/lib/sandbox" @@ -160,7 +161,12 @@ func (s *Server) getPortForward(req *types.PortForwardRequest) (*types.PortForwa return s.stream.streamServer.GetPortForward(req) } -func (s *Server) restore(ctx context.Context) { +// restore attempts to restore the sandboxes and containers. +// For every sandbox it fails to restore, it starts a cleanup routine attempting to call CNI DEL +// For every container it fails to restore, it returns that containers image, so that +// it can be cleaned up (if we're using internal_wipe). +func (s *Server) restore(ctx context.Context) []string { + containersAndTheirImages := map[string]string{} containers, err := s.Store().Containers() if err != nil && !errors.Is(err, os.ErrNotExist) { log.Warnf(ctx, "could not read containers and sandboxes: %v", err) @@ -168,7 +174,7 @@ func (s *Server) restore(ctx context.Context) { pods := map[string]*storage.RuntimeContainerMetadata{} podContainers := map[string]*storage.RuntimeContainerMetadata{} names := map[string][]string{} - deletedPods := map[string]bool{} + deletedPods := map[string]*sandbox.Sandbox{} for i := range containers { metadata, err2 := s.StorageRuntimeServer().GetContainerMetadata(containers[i].ID) if err2 != nil { @@ -184,18 +190,20 @@ func (s *Server) restore(ctx context.Context) { pods[containers[i].ID] = &metadata } else { podContainers[containers[i].ID] = &metadata + containersAndTheirImages[containers[i].ID] = containers[i].ImageID } } // Go through all the pods and check if it can be restored. If an error occurs, delete the pod and any containers // associated with it. Release the pod and container names as well. for sbID, metadata := range pods { - if err = s.LoadSandbox(ctx, sbID); err == nil { + sb, err := s.LoadSandbox(ctx, sbID) + if err == nil { continue } log.Warnf(ctx, "could not restore sandbox %s container %s: %v", metadata.PodID, sbID, err) for _, n := range names[sbID] { - if err := s.Store().DeleteContainer(n); err != nil { + if err := s.Store().DeleteContainer(n); err != nil && err != storageTypes.ErrNotAContainer { log.Warnf(ctx, "unable to delete container %s: %v", n, err) } // Release the infra container name and the pod name for future use @@ -210,7 +218,7 @@ func (s *Server) restore(ctx context.Context) { for k, v := range podContainers { if v.PodID == sbID { for _, n := range names[k] { - if err := s.Store().DeleteContainer(n); err != nil { + if err := s.Store().DeleteContainer(n); err != nil && err != storageTypes.ErrNotAContainer { log.Warnf(ctx, "unable to delete container %s: %v", n, err) } // Release the container name for future use @@ -218,46 +226,74 @@ func (s *Server) restore(ctx context.Context) { } } } - // Add the pod id to the list of deletedPods so we don't try to restore IPs for it later on - deletedPods[sbID] = true + // Add the pod id to the list of deletedPods, to be able to call CNI DEL on the sandbox network. + // Unfortunately, if we weren't able to restore a sandbox, then there's little that can be done + if sb != nil { + deletedPods[sbID] = sb + } } // Go through all the containers and check if it can be restored. If an error occurs, delete the conainer and // release the name associated with you. for containerID := range podContainers { - if err := s.LoadContainer(ctx, containerID); err != nil { - // containers of other runtimes should not be deleted - if err == lib.ErrIsNonCrioContainer { - log.Infof(ctx, "ignoring non CRI-O container %s", containerID) - } else { - log.Warnf(ctx, "could not restore container %s: %v", containerID, err) - for _, n := range names[containerID] { - if err := s.Store().DeleteContainer(n); err != nil { - log.Warnf(ctx, "unable to delete container %s: %v", n, err) - } - // Release the container name - s.ReleaseContainerName(n) - } + err := s.LoadContainer(ctx, containerID) + if err == nil || err == lib.ErrIsNonCrioContainer { + delete(containersAndTheirImages, containerID) + continue + } + log.Warnf(ctx, "Could not restore container %s: %v", containerID, err) + for _, n := range names[containerID] { + if err := s.Store().DeleteContainer(n); err != nil && err != storageTypes.ErrNotAContainer { + log.Warnf(ctx, "Unable to delete container %s: %v", n, err) } + // Release the container name + s.ReleaseContainerName(n) } } - // Restore sandbox IPs - for _, sb := range s.ListSandboxes() { - // Clean up networking if pod couldn't be restored and was deleted - if ok := deletedPods[sb.ID()]; ok { - if err := s.networkStop(ctx, sb); err != nil { - log.Warnf(ctx, "error stopping network on restore cleanup %v:", err) + // Cleanup the deletedPods in the networking plugin + wipeResourceCleaner := resourcestore.NewResourceCleaner() + for _, sb := range deletedPods { + sb := sb + cleanupFunc := func() error { + err := s.networkStop(context.Background(), sb) + if err == nil { + log.Infof(ctx, "Successfully cleaned up network for pod %s", sb.ID()) } - continue + return err } + // Clean up networking if pod couldn't be restored and was deleted + if err := cleanupFunc(); err != nil { + log.Warnf(ctx, "Error stopping network on restore cleanup (will retry) %v:", err) + wipeResourceCleaner.Add(ctx, "cleanup sandbox network", cleanupFunc) + } + } + + // If any failed to be deleted, the networking plugin is likely not ready. + // The cleanup should be retried until it succeeds. + go func() { + if err := wipeResourceCleaner.Cleanup(); err != nil { + log.Errorf(ctx, "Cleanup during server startup failed: %v", err) + } + }() + + // Restore sandbox IPs + for _, sb := range s.ListSandboxes() { ips, err := s.getSandboxIPs(sb) if err != nil { - log.Warnf(ctx, "could not restore sandbox IP for %v: %v", sb.ID(), err) + log.Warnf(ctx, "Could not restore sandbox IP for %v: %v", sb.ID(), err) continue } sb.AddIPs(ips) } + + // Return a slice of images to remove, if internal_wipe is set. + imagesOfDeletedContainers := []string{} + for _, image := range containersAndTheirImages { + imagesOfDeletedContainers = append(imagesOfDeletedContainers, image) + } + + return imagesOfDeletedContainers } // cleanupSandboxesOnShutdown Remove all running Sandboxes on system shutdown @@ -419,10 +455,9 @@ func New( return nil, errors.Wrap(err, "close stdin") } - s.restore(ctx) + deletedImages := s.restore(ctx) s.cleanupSandboxesOnShutdown(ctx) - - s.wipeIfAppropriate(ctx) + s.wipeIfAppropriate(ctx, deletedImages) var bindAddressStr string bindAddress := net.ParseIP(config.StreamAddress) @@ -492,71 +527,25 @@ func New( return s, nil } -func (s *Server) wipeIfAppropriate(ctx context.Context) { +// wipeIfAppropriate takes a list of images. If the config's VersionFilePersist +// indicates an upgrade has happened, it attempts to wipe that list of images. +// This attempt is best-effort. +func (s *Server) wipeIfAppropriate(ctx context.Context, imagesToDelete []string) { if !s.config.InternalWipe { return } - // First, check if the node was rebooted. - // We know this happened because the VersionFile (which lives in a tmpfs) - // will not be there. - shouldWipeContainers, err := version.ShouldCrioWipe(s.config.VersionFile) - if err != nil { - log.Warnf(ctx, "error encountered when checking whether cri-o should wipe containers: %v", err) - } - - // there are two locations we check before wiping: - // one in a temporary directory. This is to check whether the node has rebooted. - // if so, we should remove containers - // another is needed in a persistent directory. This is to check whether we've upgraded - // if we've upgraded, we should wipe images + // Check if our persistent version file is out of date. + // If so, we have upgrade, and we should wipe images. shouldWipeImages, err := version.ShouldCrioWipe(s.config.VersionFilePersist) if err != nil { log.Warnf(ctx, "error encountered when checking whether cri-o should wipe images: %v", err) } - shouldWipeContainers = shouldWipeContainers || shouldWipeImages - - // First, save the images we should be wiping - // We won't remember if we wipe all the containers first - var imagesToWipe []string - if shouldWipeImages { - containers, err := s.ContainerServer.ListContainers() - if err != nil { - log.Warnf(ctx, "Failed to list containers: %v", err) - } - for _, c := range containers { - imagesToWipe = append(imagesToWipe, c.ImageRef()) - } - } - - wipeResourceCleaner := resourcestore.NewResourceCleaner() - if shouldWipeContainers { - for _, sb := range s.ContainerServer.ListSandboxes() { - sb := sb - cleanupFunc := func() error { - if err := s.stopPodSandbox(ctx, sb); err != nil { - return err - } - return s.removePodSandbox(ctx, sb) - } - if err := cleanupFunc(); err != nil { - log.Warnf(ctx, "Failed to cleanup pod %s (will retry): %v", sb.ID(), err) - wipeResourceCleaner.Add(ctx, "stop and remove pod sandbox", cleanupFunc) - } - } - } - - go func() { - if err := wipeResourceCleaner.Cleanup(); err != nil { - log.Errorf(ctx, "Cleanup during server startup failed: %v", err) - } - }() - // Note: some of these will fail if some aspect of the pod cleanup failed as well, // but this is best-effort anyway, as the Kubelet will eventually cleanup images when // disk usage gets too high. if shouldWipeImages { - for _, img := range imagesToWipe { + for _, img := range imagesToDelete { if err := s.removeImage(ctx, img); err != nil { log.Warnf(ctx, "failed to remove image %s: %v", img, err) } diff --git a/test/crio-wipe.bats b/test/crio-wipe.bats index 6a317ca9370..a508b4a7930 100644 --- a/test/crio-wipe.bats +++ b/test/crio-wipe.bats @@ -11,6 +11,8 @@ function setup() { export CONTAINER_VERSION_FILE="$TESTDIR"/version.tmp export CONTAINER_VERSION_FILE_PERSIST="$TESTDIR"/version-persist.tmp export CONTAINER_CLEAN_SHUTDOWN_FILE="$TESTDIR"/clean-shutdown.tmp + CONTAINER_NAMESPACES_DIR=$(mktemp -d) + export CONTAINER_NAMESPACES_DIR } function run_podman_with_args() { @@ -23,6 +25,7 @@ function teardown() { cleanup_test run_podman_with_args stop -a run_podman_with_args rm -fa + cleanup_namespaces_dir } # run crio_wipe calls crio_wipe and tests it succeeded @@ -57,6 +60,12 @@ function test_crio_did_not_wipe_images() { [[ "$output" == *"$IMAGE_USED"* ]] } +# simulate a reboot by unmounting and removing the namespaces +function cleanup_namespaces_dir() { + find "$CONTAINER_NAMESPACES_DIR" -type f -exec umount {} \; + rm -fr "$CONTAINER_NAMESPACES_DIR" +} + function start_crio_with_stopped_pod() { start_crio @@ -205,11 +214,14 @@ function start_crio_with_stopped_pod() { } @test "internal_wipe remove containers and images when remove both" { + # simulate a reboot by having a removable namespaces dir start_crio_with_stopped_pod stop_crio_no_clean rm "$CONTAINER_VERSION_FILE" rm "$CONTAINER_VERSION_FILE_PERSIST" + # simulate a reboot by having a removable namespaces dir + cleanup_namespaces_dir CONTAINER_INTERNAL_WIPE=true start_crio_no_setup test_crio_wiped_containers @@ -221,6 +233,8 @@ function start_crio_with_stopped_pod() { stop_crio_no_clean rm "$CONTAINER_VERSION_FILE" + # simulate a reboot by having a removable namespaces dir + cleanup_namespaces_dir CONTAINER_INTERNAL_WIPE=true start_crio_no_setup test_crio_wiped_containers @@ -232,6 +246,8 @@ function start_crio_with_stopped_pod() { stop_crio_no_clean rm "$CONTAINER_VERSION_FILE_PERSIST" + # simulate a reboot by having a removable namespaces dir + cleanup_namespaces_dir CONTAINER_INTERNAL_WIPE=true start_crio_no_setup test_crio_wiped_containers @@ -276,6 +292,8 @@ function start_crio_with_stopped_pod() { runtime kill "$ctr_id" || true runtime kill "$pod_id" || true + # simulate a reboot by having a removable namespaces dir + cleanup_namespaces_dir # pretend like the CNI plugin is waiting for a container to start mv "$CRIO_CNI_PLUGIN"/"$CNI_TYPE" "$CRIO_CNI_PLUGIN"/"$CNI_TYPE"-hidden