Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions cmd/crio/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,19 @@ func main() {
logrus.Fatal(err)
}

if config.CleanShutdownFile != "" {
// clear out the shutdown file
if err := os.Remove(config.CleanShutdownFile); err != nil {
// not a fatal error, as it could have been cleaned up
logrus.Error(err)
}

// and sync the changes to disk
if err := utils.SyncParent(config.CleanShutdownFile); err != nil {
logrus.Errorf("failed to sync parent directory of clean shutdown file: %v", err)
}
}

runtime.RegisterRuntimeServiceServer(grpcServer, service)
runtime.RegisterImageServiceServer(grpcServer, service)

Expand Down
33 changes: 26 additions & 7 deletions cmd/crio/wipe.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,26 @@ func crioWipe(c *cli.Context) error {
return err
}

store, err := config.GetStore()
if err != nil {
return err
}

// first, check whether crio has shutdown with time to sync
// if not, we should clear the storage directory
if config.CleanShutdownFile != "" {
if _, err := os.Stat(config.CleanShutdownFile); err != nil {
logrus.Infof("file %s not found. Wiping storage directory %s because of suspected dirty shutdown", config.CleanShutdownFile, store.GraphRoot())
if _, err := store.Shutdown(false); err != nil {
return errors.Errorf("failed to shutdown storage before wiping: %v", err)
}
if err := os.RemoveAll(store.GraphRoot()); err != nil {
return errors.Errorf("failed to remove storage directory: %v", err)
}
return nil
}
}

shouldWipeImages := true
shouldWipeContainers := true
// First, check if we need to upgrade at all
Expand Down Expand Up @@ -64,11 +84,6 @@ func crioWipe(c *cli.Context) error {
return nil
}

store, err := config.GetStore()
if err != nil {
return err
}

cstore := ContainerStore{store}
if err := cstore.wipeCrio(shouldWipeImages); err != nil {
return err
Expand All @@ -86,10 +101,16 @@ func (c ContainerStore) wipeCrio(shouldWipeImages bool) error {
if err != nil {
return err
}
if len(crioContainers) != 0 {
logrus.Infof("Wiping containers")
}
for _, id := range crioContainers {
c.deleteContainer(id)
}
if shouldWipeImages {
if len(crioImages) != 0 {
logrus.Infof("Wiping images")
}
for _, id := range crioImages {
c.deleteImage(id)
}
Expand Down Expand Up @@ -127,7 +148,6 @@ func (c ContainerStore) getCrioContainersAndImages() (crioContainers, crioImages
}

func (c ContainerStore) deleteContainer(id string) {
logrus.Infof("wiping containers")
if mounted, err := c.store.Unmount(id, true); err != nil || mounted {
logrus.Errorf("unable to unmount container %s: %v", id, err)
return
Expand All @@ -140,7 +160,6 @@ func (c ContainerStore) deleteContainer(id string) {
}

func (c ContainerStore) deleteImage(id string) {
logrus.Infof("wiping image")
if _, err := c.store.DeleteImage(id, true); err != nil {
logrus.Errorf("unable to delete image %s: %v", id, err)
return
Expand Down
1 change: 1 addition & 0 deletions completions/bash/crio
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ h
--apparmor-profile
--bind-mount-prefix
--cgroup-manager
--clean-shutdown-file
--cni-config-dir
--cni-default-network
--cni-plugin-dir
Expand Down
1 change: 1 addition & 0 deletions completions/fish/crio.fish
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ complete -c crio -n '__fish_crio_no_subcommand' -f -l additional-devices -r -d '
complete -c crio -n '__fish_crio_no_subcommand' -f -l apparmor-profile -r -d 'Name of the apparmor profile to be used as the runtime\'s default. This only takes effect if the user does not specify a profile via the Kubernetes Pod\'s metadata annotation.'
complete -c crio -n '__fish_crio_no_subcommand' -f -l bind-mount-prefix -r -d 'A prefix to use for the source of the bind mounts. This option would be useful if you were running CRI-O in a container. And had `/` mounted on `/host` in your container. Then if you ran CRI-O with the `--bind-mount-prefix=/host` option, CRI-O would add /host to any bind mounts it is handed over CRI. If Kubernetes asked to have `/var/lib/foobar` bind mounted into the container, then CRI-O would bind mount `/host/var/lib/foobar`. Since CRI-O itself is running in a container with `/` or the host mounted on `/host`, the container would end up with `/var/lib/foobar` from the host mounted in the container rather then `/var/lib/foobar` from the CRI-O container. (default: "")'
complete -c crio -n '__fish_crio_no_subcommand' -f -l cgroup-manager -r -d 'cgroup manager (cgroupfs or systemd)'
complete -c crio -n '__fish_crio_no_subcommand' -l clean-shutdown-file -r -d 'Location for CRI-O to lay down the clean shutdown file. It indicates whether we\'ve had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory'
complete -c crio -n '__fish_crio_no_subcommand' -l cni-config-dir -r -d 'CNI configuration files directory'
complete -c crio -n '__fish_crio_no_subcommand' -f -l cni-default-network -r -d 'Name of the default CNI network to select. If not set or "", then CRI-O will pick-up the first one found in --cni-config-dir.'
complete -c crio -n '__fish_crio_no_subcommand' -f -l cni-plugin-dir -r -d 'CNI plugin binaries directory'
Expand Down
2 changes: 1 addition & 1 deletion completions/zsh/_crio
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ it later with **--config**. Global options will modify the output.' 'version:dis
_describe 'commands' cmds

local -a opts
opts=('--additional-devices' '--apparmor-profile' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--enable-metrics' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--insecure-registry' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-network-ns-lifecycle' '--manage-ns-lifecycle' '--metrics-port' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtime' '--runtimes' '--seccomp-profile' '--selinux' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
opts=('--additional-devices' '--apparmor-profile' '--bind-mount-prefix' '--cgroup-manager' '--clean-shutdown-file' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--enable-metrics' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--insecure-registry' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-network-ns-lifecycle' '--manage-ns-lifecycle' '--metrics-port' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtime' '--runtimes' '--seccomp-profile' '--selinux' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
_describe 'global options' opts

return
Expand Down
3 changes: 3 additions & 0 deletions docs/crio.8.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ crio
[--apparmor-profile]=[value]
[--bind-mount-prefix]=[value]
[--cgroup-manager]=[value]
[--clean-shutdown-file]=[value]
[--cni-config-dir]=[value]
[--cni-default-network]=[value]
[--cni-plugin-dir]=[value]
Expand Down Expand Up @@ -117,6 +118,8 @@ crio [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]

**--cgroup-manager**="": cgroup manager (cgroupfs or systemd) (default: systemd)

**--clean-shutdown-file**="": Location for CRI-O to lay down the clean shutdown file. It indicates whether we've had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory (default: /var/lib/crio/clean.shutdown)

**--cni-config-dir**="": CNI configuration files directory (default: /etc/cni/net.d/)

**--cni-default-network**="": Name of the default CNI network to select. If not set or "", then CRI-O will pick-up the first one found in --cni-config-dir.
Expand Down
5 changes: 5 additions & 0 deletions docs/crio.conf.5.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,11 @@ CRI-O reads its storage defaults from the containers-storage.conf(5) file locate
It is used to check if crio wipe should wipe images, which should
only happen when CRI-O has been upgraded

**clean_shutdown_file**="/var/lib/crio/clean.shutdown"
Location for CRI-O to lay down the clean shutdown file.
It is used to check whether crio had time to sync before shutting down.
If not found, crio wipe will clear the storage directory.

## CRIO.API TABLE
The `crio.api` table contains settings for the kubelet/gRPC interface.

Expand Down
10 changes: 10 additions & 0 deletions internal/criocli/criocli.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,9 @@ func mergeConfig(config *libconfig.Config, ctx *cli.Context) error {
if ctx.IsSet("version-file-persist") {
config.VersionFilePersist = ctx.String("version-file-persist")
}
if ctx.IsSet("clean-shutdown-file") {
config.CleanShutdownFile = ctx.String("clean-shutdown-file")
}
if ctx.IsSet("enable-metrics") {
config.EnableMetrics = ctx.Bool("enable-metrics")
}
Expand Down Expand Up @@ -765,6 +768,13 @@ func getCrioFlags(defConf *libconfig.Config) []cli.Flag {
EnvVars: []string{"CONTAINER_VERSION_FILE_PERSIST"},
TakesFile: true,
},
&cli.StringFlag{
Name: "clean-shutdown-file",
Usage: "Location for CRI-O to lay down the clean shutdown file. It indicates whether we've had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory",
Value: defConf.CleanShutdownFile,
EnvVars: []string{"CONTAINER_CLEAN_SHUTDOWN_FILE"},
TakesFile: true,
},
}
}

Expand Down
5 changes: 5 additions & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,10 @@ type RootConfig struct {
// VersionFilePersist is the location CRI-O will lay down the version file
// that checks whether we've upgraded
VersionFilePersist string `toml:"version_file_persist"`

// CleanShutdownFile is the location CRI-O will lay down the clean shutdown file
// that checks whether we've had time to sync before shutting down
CleanShutdownFile string `toml:"clean_shutdown_file"`
}

// RuntimeHandler represents each item of the "crio.runtime.runtimes" TOML
Expand Down Expand Up @@ -527,6 +531,7 @@ func DefaultConfig() (*Config, error) {
LogDir: "/var/log/crio/pods",
VersionFile: CrioVersionPathTmp,
VersionFilePersist: CrioVersionPathPersist,
CleanShutdownFile: CrioCleanShutdownFile,
},
APIConfig: APIConfig{
Listen: CrioSocketPath,
Expand Down
5 changes: 5 additions & 0 deletions pkg/config/config_unix.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,9 @@ const (
// CrioVersionPathPersist is where the CRI-O version file is located
// used to check whether we've upgraded, and thus need to remove images
CrioVersionPathPersist = "/var/lib/crio/version"

// CrioCleanShutdownFile is the location CRI-O will lay down the clean shutdown file
// that checks whether we've had time to sync before shutting down.
// If not, crio wipe will clear the storage directory.
CrioCleanShutdownFile = "/var/lib/crio/clean.shutdown"
)
5 changes: 5 additions & 0 deletions pkg/config/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@ version_file = "{{ .VersionFile }}"
# only happen when CRI-O has been upgraded
version_file_persist = "{{ .VersionFilePersist }}"

# Location for CRI-O to lay down the clean shutdown file.
# It is used to check whether crio had time to sync before shutting down.
# If not found, crio wipe will clear the storage directory.
clean_shutdown_file = "{{ .CleanShutdownFile }}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we want to be able to disable this feature if clean_shutdown_file = "" or commented out?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have added this


# The crio.api table contains settings for the kubelet/gRPC interface.
[crio.api]

Expand Down
34 changes: 33 additions & 1 deletion server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/cri-o/cri-o/internal/storage"
libconfig "github.com/cri-o/cri-o/pkg/config"
"github.com/cri-o/cri-o/server/metrics"
"github.com/cri-o/cri-o/utils"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
Expand Down Expand Up @@ -270,7 +271,38 @@ func (s *Server) Shutdown(ctx context.Context) error {
// crio.service restart!!!
s.cleanupSandboxesOnShutdown(ctx)

return s.ContainerServer.Shutdown()
if err := s.ContainerServer.Shutdown(); err != nil {
return err
}

// first, make sure we sync all storage changes
if err := utils.Sync(s.Store().GraphRoot()); err != nil {
return errors.Wrapf(err, "failed to sync graph root after shutting down")
}

if s.config.CleanShutdownFile != "" {
// then, we write the CleanShutdownFile
// we do this after the sync, to ensure ordering.
// Otherwise, we may run into situations where the CleanShutdownFile
// is written before storage, causing us to think a corrupted storage
// is not so.
f, err := os.Create(s.config.CleanShutdownFile)
if err != nil {
return errors.Wrapf(err, "failed to write file to indicate a clean shutdown")
}
f.Close()

// finally, attempt to sync the newly created file to disk.
// It's still possible we crash after Create but before this Sync,
// which will lead us to think storage wasn't synced.
// However, that's much less likely than if we don't have a second Sync,
// and less risky than if we don't Sync after the Create
if err := utils.SyncParent(s.config.CleanShutdownFile); err != nil {
return errors.Wrapf(err, "failed to sync clean shutdown file")
}
}

return nil
}

// configureMaxThreads sets the Go runtime max threads threshold
Expand Down
6 changes: 6 additions & 0 deletions server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package server_test

import (
"context"
"os"

cstorage "github.com/containers/storage"
"github.com/cri-o/cri-o/server"
Expand Down Expand Up @@ -251,13 +252,18 @@ var _ = t.Describe("Server", func() {
// Given
gomock.InOrder(
storeMock.EXPECT().Shutdown(gomock.Any()).Return(nil, nil),
storeMock.EXPECT().GraphRoot().Return(emptyDir),
)

// When
err := sut.Shutdown(context.Background())

// Then
Expect(err).To(BeNil())

// expect cri-o to have created the clean shutdown file
_, err = os.Stat(sut.Config().CleanShutdownFile)
Expect(err).To(BeNil())
})
})

Expand Down
1 change: 1 addition & 0 deletions server/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ var beforeEach = func() {
serverConfig.ContainerAttachSocketDir = testPath
serverConfig.ContainerExitsDir = path.Join(testPath, "exits")
serverConfig.LogDir = path.Join(testPath, "log")
serverConfig.CleanShutdownFile = path.Join(testPath, "clean.shutdown")

// We want a directory that is guaranteed to exist, but it must
// be empty so we don't erroneously load anything and make tests
Expand Down
73 changes: 72 additions & 1 deletion test/crio-wipe.bats
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ function setup() {
setup_test
export CONTAINER_VERSION_FILE="$TESTDIR"/version.tmp
export CONTAINER_VERSION_FILE_PERSIST="$TESTDIR"/version-persist.tmp
export CONTAINER_CLEAN_SHUTDOWN_FILE="$TESTDIR"/clean-shutdown.tmp
}

function run_podman_with_args() {
Expand All @@ -28,7 +29,7 @@ function teardown() {

# run crio_wipe calls crio_wipe and tests it succeeded
function run_crio_wipe() {
run $CRIO_BINARY_PATH --config "$CRIO_CONFIG" wipe
run $CRIO_BINARY_PATH --config "$CRIO_CONFIG" wipe $@
echo "$status"
echo "$output"
[ "$status" -eq 0 ]
Expand Down Expand Up @@ -134,3 +135,73 @@ function start_crio_with_stopped_pod() {
run_podman_with_args ps -a
[[ "$output" =~ "test" ]]
}

@test "don't clear everything when not asked to check shutdown" {
start_crio_with_stopped_pod
stop_crio_no_clean

rm "$CONTAINER_CLEAN_SHUTDOWN_FILE"

export CONTAINER_CLEAN_SHUTDOWN_FILE=""
run_crio_wipe

start_crio_no_setup

test_crio_did_not_wipe_containers
test_crio_did_not_wipe_images
}

@test "do clear everything when shutdown file not found" {
start_crio_with_stopped_pod
stop_crio_no_clean

rm "$CONTAINER_CLEAN_SHUTDOWN_FILE"

run_crio_wipe

start_crio_no_setup

test_crio_wiped_containers
test_crio_wiped_images
}

@test "do clear podman containers when shutdown file not found" {
if [[ -z "$PODMAN_BINARY" ]]; then
skip "Podman not installed"
fi

start_crio_with_stopped_pod
stop_crio_no_clean

run_podman_with_args run --name test quay.io/crio/busybox:latest ls
# all podman containers would be stopped after a reboot
run_podman_with_args stop -a


rm "$CONTAINER_CLEAN_SHUTDOWN_FILE"

run_crio_wipe

run_podman_with_args ps -a
[[ ! "$output" =~ "test" ]]
}

@test "fail to clear podman containers when shutdown file not found but container still running" {
if [[ -z "$PODMAN_BINARY" ]]; then
skip "Podman not installed"
fi

start_crio_with_stopped_pod
stop_crio_no_clean

# all podman containers would be stopped after a reboot
run_podman_with_args run --name test -d quay.io/crio/busybox:latest top


rm "$CONTAINER_CLEAN_SHUTDOWN_FILE"

run $CRIO_BINARY_PATH --config "$CRIO_CONFIG" wipe
echo "$status"
echo "$output"
[ "$status" -ne 0 ]
}
2 changes: 2 additions & 0 deletions test/helpers.bash
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@ fi
function setup_test() {
TESTDIR=$(mktemp -d)
RANDOM_CNI_NETWORK=${TESTDIR: -10}
NAMESPACES_DIR=$TESTDIR/ns
mkdir "$NAMESPACES_DIR"

# Setup default hooks dir
HOOKSDIR=$TESTDIR/hooks
Expand Down
Loading