diff --git a/cmd/crio/main.go b/cmd/crio/main.go index 4ece4e19cf2..df7581f6def 100644 --- a/cmd/crio/main.go +++ b/cmd/crio/main.go @@ -241,6 +241,28 @@ func main() { logrus.Fatal(err) } + if config.CleanShutdownFile != "" { + // clear out the shutdown file + if err := os.Remove(config.CleanShutdownFile); err != nil && !os.IsNotExist(err) { + logrus.Error(err) + } + + // Write "$CleanShutdownFile".supported to show crio-wipe that + // we should be wiping if the CleanShutdownFile wasn't found. + // This protects us from wiping after an upgrade from a version that don't support + // CleanShutdownFile. + f, err := os.Create(config.CleanShutdownSupportedFileName()) + if err != nil { + logrus.Errorf("Writing clean shutdown supported file: %v", err) + } + f.Close() + + // and sync the changes to disk + if err := utils.SyncParent(config.CleanShutdownFile); err != nil { + logrus.Errorf("failed to sync parent directory of clean shutdown file: %v", err) + } + } + runtime.RegisterRuntimeServiceServer(grpcServer, service) runtime.RegisterImageServiceServer(grpcServer, service) diff --git a/cmd/crio/wipe.go b/cmd/crio/wipe.go index 44a7569f5aa..4875f6356d1 100644 --- a/cmd/crio/wipe.go +++ b/cmd/crio/wipe.go @@ -7,6 +7,7 @@ import ( "github.com/cri-o/cri-o/internal/criocli" "github.com/cri-o/cri-o/internal/storage" "github.com/cri-o/cri-o/internal/version" + crioconf "github.com/cri-o/cri-o/pkg/config" json "github.com/json-iterator/go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -32,17 +33,25 @@ func crioWipe(c *cli.Context) error { return err } + store, err := config.GetStore() + if err != nil { + return err + } shouldWipeImages := true shouldWipeContainers := true - // First, check if we need to upgrade at all + if !c.IsSet("force") { - // there are two locations we check before wiping: - // one in a temporary directory. This is to check whether the node has rebooted. - // if so, we should remove containers + // First, check if the node was rebooted. + // We know this happened because the VersionFile (which lives in a tmpfs) + // will not be there. shouldWipeContainers, err = version.ShouldCrioWipe(config.VersionFile) if err != nil { - logrus.Infof("%v: triggering wipe of containers", err.Error()) + logrus.Infof("checking whether cri-o should wipe containers: %v", err) } + + // there are two locations we check before wiping: + // one in a temporary directory. This is to check whether the node has rebooted. + // if so, we should remove containers // another is needed in a persistent directory. This is to check whether we've upgraded // if we've upgraded, we should wipe images shouldWipeImages, err = version.ShouldCrioWipe(config.VersionFilePersist) @@ -51,6 +60,13 @@ func crioWipe(c *cli.Context) error { } } + // Then, check whether crio has shutdown with time to sync. + // Note: this is only needed if the node rebooted. + // If there wasn't time to sync, we should clear the storage directory + if shouldWipeContainers && shutdownWasUnclean(config) { + return handleCleanShutdown(config, store) + } + // If crio is configured to wipe internally (and `--force` wasn't set) // the `crio wipe` command has nothing left to do, // as the remaining work will be done on server startup. @@ -73,11 +89,6 @@ func crioWipe(c *cli.Context) error { return nil } - store, err := config.GetStore() - if err != nil { - return err - } - cstore := ContainerStore{store} if err := cstore.wipeCrio(shouldWipeImages); err != nil { return err @@ -86,6 +97,40 @@ func crioWipe(c *cli.Context) error { return nil } +func shutdownWasUnclean(config *crioconf.Config) bool { + // CleanShutdownFile not configured, skip + if config.CleanShutdownFile == "" { + return false + } + // CleanShutdownFile isn't supported, skip + if _, err := os.Stat(config.CleanShutdownSupportedFileName()); err != nil { + return false + } + // CleanShutdownFile is present, indicating clean shutdown + if _, err := os.Stat(config.CleanShutdownFile); err == nil { + return false + } + return true +} + +func handleCleanShutdown(config *crioconf.Config, store cstorage.Store) error { + logrus.Infof("file %s not found. Wiping storage directory %s because of suspected dirty shutdown", config.CleanShutdownFile, store.GraphRoot()) + // If we do not do this, we may leak other resources that are not directly in the graphroot. + // Erroring here should not be fatal though, it's a best effort cleanup + if err := store.Wipe(); err != nil { + logrus.Infof("failed to wipe storage cleanly: %v", err) + } + // unmount storage or else we will fail with EBUSY + if _, err := store.Shutdown(false); err != nil { + return errors.Errorf("failed to shutdown storage before wiping: %v", err) + } + // totally remove storage, whatever is left (possibly orphaned layers) + if err := os.RemoveAll(store.GraphRoot()); err != nil { + return errors.Errorf("failed to remove storage directory: %v", err) + } + return nil +} + type ContainerStore struct { store cstorage.Store } @@ -96,14 +141,14 @@ func (c ContainerStore) wipeCrio(shouldWipeImages bool) error { return err } if len(crioContainers) != 0 { - logrus.Infof("wiping containers") + logrus.Infof("Wiping containers") } for _, id := range crioContainers { c.deleteContainer(id) } if shouldWipeImages { if len(crioImages) != 0 { - logrus.Infof("wiping images") + logrus.Infof("Wiping images") } for _, id := range crioImages { c.deleteImage(id) diff --git a/completions/bash/crio b/completions/bash/crio index 3e53048e531..b09ec045436 100755 --- a/completions/bash/crio +++ b/completions/bash/crio @@ -17,6 +17,7 @@ h --big-files-temporary-dir --bind-mount-prefix --cgroup-manager +--clean-shutdown-file --cni-config-dir --cni-default-network --cni-plugin-dir diff --git a/completions/fish/crio.fish b/completions/fish/crio.fish index e56a20dacf5..b6d346521e3 100644 --- a/completions/fish/crio.fish +++ b/completions/fish/crio.fish @@ -14,6 +14,7 @@ complete -c crio -n '__fish_crio_no_subcommand' -f -l apparmor-profile -r -d 'Na complete -c crio -n '__fish_crio_no_subcommand' -f -l big-files-temporary-dir -r -d 'Path to the temporary directory to use for storing big files, used to store image blobs and data streams related to containers image management.' complete -c crio -n '__fish_crio_no_subcommand' -f -l bind-mount-prefix -r -d 'A prefix to use for the source of the bind mounts. This option would be useful if you were running CRI-O in a container. And had `/` mounted on `/host` in your container. Then if you ran CRI-O with the `--bind-mount-prefix=/host` option, CRI-O would add /host to any bind mounts it is handed over CRI. If Kubernetes asked to have `/var/lib/foobar` bind mounted into the container, then CRI-O would bind mount `/host/var/lib/foobar`. Since CRI-O itself is running in a container with `/` or the host mounted on `/host`, the container would end up with `/var/lib/foobar` from the host mounted in the container rather then `/var/lib/foobar` from the CRI-O container. (default: "")' complete -c crio -n '__fish_crio_no_subcommand' -f -l cgroup-manager -r -d 'cgroup manager (cgroupfs or systemd)' +complete -c crio -n '__fish_crio_no_subcommand' -l clean-shutdown-file -r -d 'Location for CRI-O to lay down the clean shutdown file. It indicates whether we\'ve had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory' complete -c crio -n '__fish_crio_no_subcommand' -l cni-config-dir -r -d 'CNI configuration files directory' complete -c crio -n '__fish_crio_no_subcommand' -f -l cni-default-network -r -d 'Name of the default CNI network to select. If not set or "", then CRI-O will pick-up the first one found in --cni-config-dir.' complete -c crio -n '__fish_crio_no_subcommand' -f -l cni-plugin-dir -r -d 'CNI plugin binaries directory' diff --git a/completions/zsh/_crio b/completions/zsh/_crio index 991c0b6b43c..f9f05e1267f 100644 --- a/completions/zsh/_crio +++ b/completions/zsh/_crio @@ -7,7 +7,7 @@ it later with **--config**. Global options will modify the output.' 'version:dis _describe 'commands' cmds local -a opts - opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--enable-profile-unix-socket' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--infra-ctr-cpuset' '--insecure-registry' '--internal-wipe' '--irqbalance-config-file' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--separate-pull-cgroup' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version') + opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--clean-shutdown-file' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--enable-profile-unix-socket' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--infra-ctr-cpuset' '--insecure-registry' '--internal-wipe' '--irqbalance-config-file' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--separate-pull-cgroup' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version') _describe 'global options' opts return diff --git a/docs/crio.8.md b/docs/crio.8.md index b31c670fe66..32831465903 100644 --- a/docs/crio.8.md +++ b/docs/crio.8.md @@ -16,6 +16,7 @@ crio [--big-files-temporary-dir]=[value] [--bind-mount-prefix]=[value] [--cgroup-manager]=[value] +[--clean-shutdown-file]=[value] [--cni-config-dir]=[value] [--cni-default-network]=[value] [--cni-plugin-dir]=[value] @@ -128,6 +129,8 @@ crio [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] **--cgroup-manager**="": cgroup manager (cgroupfs or systemd) (default: systemd) +**--clean-shutdown-file**="": Location for CRI-O to lay down the clean shutdown file. It indicates whether we've had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory (default: /var/lib/crio/clean.shutdown) + **--cni-config-dir**="": CNI configuration files directory (default: /etc/cni/net.d/) **--cni-default-network**="": Name of the default CNI network to select. If not set or "", then CRI-O will pick-up the first one found in --cni-config-dir. diff --git a/docs/crio.conf.5.md b/docs/crio.conf.5.md index d85d9a1d5eb..30836e0c0e3 100644 --- a/docs/crio.conf.5.md +++ b/docs/crio.conf.5.md @@ -54,6 +54,11 @@ CRI-O reads its storage defaults from the containers-storage.conf(5) file locate It is used to check if crio wipe should wipe images, which should only happen when CRI-O has been upgraded +**clean_shutdown_file**="/var/lib/crio/clean.shutdown" + Location for CRI-O to lay down the clean shutdown file. + It is used to check whether crio had time to sync before shutting down. + If not found, crio wipe will clear the storage directory. + **internal_wipe**=false Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations. diff --git a/internal/criocli/criocli.go b/internal/criocli/criocli.go index 3bc04eea53a..45a6ecd31fd 100644 --- a/internal/criocli/criocli.go +++ b/internal/criocli/criocli.go @@ -284,6 +284,9 @@ func mergeConfig(config *libconfig.Config, ctx *cli.Context) error { if ctx.IsSet("version-file-persist") { config.VersionFilePersist = ctx.String("version-file-persist") } + if ctx.IsSet("clean-shutdown-file") { + config.CleanShutdownFile = ctx.String("clean-shutdown-file") + } if ctx.IsSet("internal-wipe") { config.InternalWipe = ctx.Bool("internal-wipe") } @@ -841,6 +844,13 @@ func getCrioFlags(defConf *libconfig.Config) []cli.Flag { Usage: "CPU set to run infra containers, if not specified CRI-O will use all online CPUs to run infra containers (default: '').", EnvVars: []string{"CONTAINER_INFRA_CTR_CPUSET"}, }, + &cli.StringFlag{ + Name: "clean-shutdown-file", + Usage: "Location for CRI-O to lay down the clean shutdown file. It indicates whether we've had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory", + Value: defConf.CleanShutdownFile, + EnvVars: []string{"CONTAINER_CLEAN_SHUTDOWN_FILE"}, + TakesFile: true, + }, } } diff --git a/pkg/config/config.go b/pkg/config/config.go index 920c184fd72..7e8bfd7b053 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -148,6 +148,10 @@ type RootConfig struct { // that checks whether we've upgraded VersionFilePersist string `toml:"version_file_persist"` + // CleanShutdownFile is the location CRI-O will lay down the clean shutdown file + // that checks whether we've had time to sync before shutting down + CleanShutdownFile string `toml:"clean_shutdown_file"` + // InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. // If set to false, one must use the external command `crio wipe` to wipe the containers and images in these situations. InternalWipe bool `toml:"internal_wipe"` @@ -586,6 +590,7 @@ func DefaultConfig() (*Config, error) { LogDir: "/var/log/crio/pods", VersionFile: CrioVersionPathTmp, VersionFilePersist: CrioVersionPathPersist, + CleanShutdownFile: CrioCleanShutdownFile, }, APIConfig: APIConfig{ Listen: CrioSocketPath, @@ -754,6 +759,10 @@ func (c *RootConfig) Validate(onExecution bool) error { return nil } +func (c *RootConfig) CleanShutdownSupportedFileName() string { + return c.CleanShutdownFile + ".supported" +} + // Validate is the main entry point for runtime configuration validation // The parameter `onExecution` specifies if the validation should include // execution checks. It returns an `error` on validation failure, otherwise diff --git a/pkg/config/config_unix.go b/pkg/config/config_unix.go index bfe613749d6..a90c716e3c2 100644 --- a/pkg/config/config_unix.go +++ b/pkg/config/config_unix.go @@ -25,4 +25,9 @@ const ( // CrioVersionPathPersist is where the CRI-O version file is located // used to check whether we've upgraded, and thus need to remove images CrioVersionPathPersist = "/var/lib/crio/version" + + // CrioCleanShutdownFile is the location CRI-O will lay down the clean shutdown file + // that checks whether we've had time to sync before shutting down. + // If not, crio wipe will clear the storage directory. + CrioCleanShutdownFile = "/var/lib/crio/clean.shutdown" ) diff --git a/pkg/config/template.go b/pkg/config/template.go index c100e997612..ae5a67c07de 100644 --- a/pkg/config/template.go +++ b/pkg/config/template.go @@ -62,6 +62,11 @@ version_file = "{{ .VersionFile }}" # only happen when CRI-O has been upgraded version_file_persist = "{{ .VersionFilePersist }}" +# Location for CRI-O to lay down the clean shutdown file. +# It is used to check whether crio had time to sync before shutting down. +# If not found, crio wipe will clear the storage directory. +clean_shutdown_file = "{{ .CleanShutdownFile }}" + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. internal_wipe = {{ .InternalWipe }} diff --git a/server/server.go b/server/server.go index 394a6694d0d..74125bf1471 100644 --- a/server/server.go +++ b/server/server.go @@ -29,6 +29,7 @@ import ( "github.com/cri-o/cri-o/internal/version" libconfig "github.com/cri-o/cri-o/pkg/config" "github.com/cri-o/cri-o/server/metrics" + "github.com/cri-o/cri-o/utils" "github.com/fsnotify/fsnotify" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -312,7 +313,38 @@ func (s *Server) Shutdown(ctx context.Context) error { // crio.service restart!!! s.cleanupSandboxesOnShutdown(ctx) - return s.ContainerServer.Shutdown() + if err := s.ContainerServer.Shutdown(); err != nil { + return err + } + + // first, make sure we sync all storage changes + if err := utils.Sync(s.Store().GraphRoot()); err != nil { + return errors.Wrapf(err, "failed to sync graph root after shutting down") + } + + if s.config.CleanShutdownFile != "" { + // then, we write the CleanShutdownFile + // we do this after the sync, to ensure ordering. + // Otherwise, we may run into situations where the CleanShutdownFile + // is written before storage, causing us to think a corrupted storage + // is not so. + f, err := os.Create(s.config.CleanShutdownFile) + if err != nil { + return errors.Wrapf(err, "failed to write file to indicate a clean shutdown") + } + f.Close() + + // finally, attempt to sync the newly created file to disk. + // It's still possible we crash after Create but before this Sync, + // which will lead us to think storage wasn't synced. + // However, that's much less likely than if we don't have a second Sync, + // and less risky than if we don't Sync after the Create + if err := utils.SyncParent(s.config.CleanShutdownFile); err != nil { + return errors.Wrapf(err, "failed to sync clean shutdown file") + } + } + + return nil } // configureMaxThreads sets the Go runtime max threads threshold diff --git a/server/server_test.go b/server/server_test.go index 81a867bb25c..996112423f3 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -2,6 +2,7 @@ package server_test import ( "context" + "os" cstorage "github.com/containers/storage" "github.com/cri-o/cri-o/server" @@ -267,6 +268,7 @@ var _ = t.Describe("Server", func() { // Given gomock.InOrder( storeMock.EXPECT().Shutdown(gomock.Any()).Return(nil, nil), + storeMock.EXPECT().GraphRoot().Return(emptyDir), ) // When @@ -274,6 +276,10 @@ var _ = t.Describe("Server", func() { // Then Expect(err).To(BeNil()) + + // expect cri-o to have created the clean shutdown file + _, err = os.Stat(sut.Config().CleanShutdownFile) + Expect(err).To(BeNil()) }) }) diff --git a/server/suite_test.go b/server/suite_test.go index d37e5238cc9..52b965d0ad9 100644 --- a/server/suite_test.go +++ b/server/suite_test.go @@ -142,6 +142,7 @@ var beforeEach = func() { serverConfig.ContainerAttachSocketDir = testPath serverConfig.ContainerExitsDir = path.Join(testPath, "exits") serverConfig.LogDir = path.Join(testPath, "log") + serverConfig.CleanShutdownFile = path.Join(testPath, "clean.shutdown") // We want a directory that is guaranteed to exist, but it must // be empty so we don't erroneously load anything and make tests diff --git a/test/crio-wipe.bats b/test/crio-wipe.bats index fe35eeceda0..daf387480f8 100644 --- a/test/crio-wipe.bats +++ b/test/crio-wipe.bats @@ -12,6 +12,7 @@ function setup() { export CONTAINER_VERSION_FILE_PERSIST="$TESTDIR"/version-persist.tmp CONTAINER_NAMESPACES_DIR=$(mktemp -d) export CONTAINER_NAMESPACES_DIR + export CONTAINER_CLEAN_SHUTDOWN_FILE="$TESTDIR"/clean-shutdown.tmp } function run_podman_with_args() { @@ -130,6 +131,76 @@ function start_crio_with_stopped_pod() { run_podman_with_args ps -a | grep test } +@test "do clear everything when shutdown file not found" { + start_crio_with_stopped_pod + stop_crio_no_clean + + rm "$CONTAINER_CLEAN_SHUTDOWN_FILE" + rm "$CONTAINER_VERSION_FILE" + + run_crio_wipe + + start_crio_no_setup + + test_crio_wiped_containers + test_crio_wiped_images +} + +@test "do clear podman containers when shutdown file not found" { + if [[ -z "$PODMAN_BINARY" ]]; then + skip "Podman not installed" + fi + + start_crio_with_stopped_pod + stop_crio_no_clean + + run_podman_with_args run --name test quay.io/crio/busybox:latest ls + # all podman containers would be stopped after a reboot + run_podman_with_args stop -a + + rm "$CONTAINER_CLEAN_SHUTDOWN_FILE" + rm "$CONTAINER_VERSION_FILE" + + run_crio_wipe + + run_podman_with_args ps -a + [[ ! "$output" =~ "test" ]] +} + +@test "fail to clear podman containers when shutdown file not found but container still running" { + if [[ -z "$PODMAN_BINARY" ]]; then + skip "Podman not installed" + fi + + start_crio_with_stopped_pod + stop_crio_no_clean + + # all podman containers would be stopped after a reboot + run_podman_with_args run --name test -d quay.io/crio/busybox:latest top + + rm "$CONTAINER_CLEAN_SHUTDOWN_FILE" + rm "$CONTAINER_VERSION_FILE" + + run "$CRIO_BINARY_PATH" --config "$CRIO_CONFIG" wipe + echo "$status" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "don't clear containers if clean shutdown supported file not present" { + start_crio_with_stopped_pod + stop_crio_no_clean + + rm "$CONTAINER_CLEAN_SHUTDOWN_FILE.supported" + + run_crio_wipe + + start_crio_no_setup + + test_crio_did_not_wipe_containers + test_crio_did_not_wipe_images +} + @test "internal_wipe remove containers and images when remove both" { # simulate a reboot by having a removable namespaces dir start_crio_with_stopped_pod diff --git a/test/helpers.bash b/test/helpers.bash index 74f6c79e98c..53c10f47a84 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -379,8 +379,9 @@ function cleanup_pods() { } function stop_crio_no_clean() { + local signal="$1" if [ -n "${CRIO_PID+x}" ]; then - kill "$CRIO_PID" >/dev/null 2>&1 + kill "$signal" "$CRIO_PID" >/dev/null 2>&1 || true wait "$CRIO_PID" unset CRIO_PID fi @@ -388,7 +389,7 @@ function stop_crio_no_clean() { # Stop crio. function stop_crio() { - stop_crio_no_clean + stop_crio_no_clean "" cleanup_network_conf } diff --git a/utils/utils.go b/utils/utils.go index 0b413c26935..802eb76af6c 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -351,3 +351,22 @@ func GetLabelOptions(selinuxOptions *pb.SELinuxOption) []string { } return labels } + +// SyncParent ensures a path's parent directory is synced to disk +func SyncParent(path string) error { + return Sync(filepath.Dir(path)) +} + +// Sync ensures a path is synced to disk +func Sync(path string) error { + f, err := os.OpenFile(path, os.O_RDONLY, 0o755) + if err != nil { + return err + } + defer f.Close() + + if err := syscall.Fsync(int(f.Fd())); err != nil { + return err + } + return nil +}