Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
d0b0250
test: add runtime() function
haircommander Feb 22, 2021
051fd17
resourcestore: introduce ResourceCleaner
haircommander Apr 5, 2021
55c4e5f
server: use background context for network stop
haircommander Apr 26, 2021
0301c9f
Cleanup pod network on sandbox removal
saschagrunert Apr 15, 2021
c9cd7e4
server: reuse container removal code for infra
haircommander Apr 15, 2021
90f11dd
storage: remove RemovePodSandbox function
haircommander Apr 15, 2021
b9a8ea6
server: breakup stop/remove all functions with internal helpers
haircommander Apr 15, 2021
bcbc7b8
config: add InternalWipe
haircommander Apr 15, 2021
5008d0f
crio wipe: add support for internal_wipe
haircommander Apr 15, 2021
a35ef0a
server: add support for internal_wipe
haircommander Apr 15, 2021
5e63d0a
test: add test for internal_wipe
haircommander Apr 15, 2021
21ffc59
Add resource cleaner retry functionality
saschagrunert Apr 16, 2021
2c9c9f3
server: move newPodNetwork to a more logical place
haircommander Apr 16, 2021
5104e67
server: get hooks after we've check if a sandbox is already stopped
haircommander Apr 16, 2021
be32274
test: add test for delayed cleanup of network on restart
haircommander Apr 16, 2021
b9fc6fb
resourcestore: run cleanup in parallel
haircommander Apr 26, 2021
8d7c0a2
server: group namespace cleanup with network stop
haircommander Apr 21, 2021
8b1da95
server: don't unconditionally fail on sandbox cleanup
haircommander Apr 21, 2021
b8e2a46
sandbox: fix race with cleanup
haircommander Apr 26, 2021
7c035f0
use more ContainerServer.StopContainer
haircommander Apr 26, 2021
2442b66
sandbox remove: unmount shm before removing infra container
haircommander Apr 26, 2021
77274ab
server: properly remove sandbox network on failed restore
haircommander May 13, 2021
0452fc6
move internal wipe to only wipe images
haircommander May 13, 2021
af9ab4a
ignore storage.ErrNotAContainer
haircommander May 13, 2021
df794ef
test: adapt crio wipe tests to handle new behavior
haircommander May 14, 2021
51883ae
test: fix crio-wipe test
haircommander Jun 25, 2021
f703370
container_server: fix nsJoining
haircommander Jun 17, 2021
aebe2b0
storage: succeed in DeleteContainer if container is unknown
haircommander Jun 2, 2021
91378ef
server: don't repeatedly error with no such id
haircommander Jun 16, 2021
c29b65e
server: reduce log verbosity on restore
haircommander May 20, 2021
1a8b820
server: call CNI del in separate routine in restore
haircommander May 20, 2021
25815d3
fix lint by removing dead code
haircommander Jun 17, 2021
6931816
lib: unconditionally attempt to restore namespaces
haircommander Jun 22, 2021
0007aff
server: fix another repeated no such id error
haircommander Jul 29, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions cmd/crio/wipe.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,15 @@ func crioWipe(c *cli.Context) error {
}
}

// If crio is configured to wipe internally (and `--force` wasn't set)
// the `crio wipe` command has nothing left to do,
// as the remaining work will be done on server startup.
if config.InternalWipe && !c.IsSet("force") {
return nil
}

logrus.Infof("Internal wipe not set, meaning crio wipe will wipe. In the future, all wipes after reboot will happen when starting the crio server.")

// if we should not wipe, exit with no error
if !shouldWipeContainers {
// we should not wipe images without wiping containers
Expand Down
1 change: 1 addition & 0 deletions completions/bash/crio
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ h
--hooks-dir
--image-volumes
--insecure-registry
--internal-wipe
--listen
--log
--log-dir
Expand Down
1 change: 1 addition & 0 deletions completions/fish/crio.fish
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ complete -c crio -n '__fish_crio_no_subcommand' -f -l insecure-registry -r -d 'E
be enabled for testing purposes**. For increased security, users should add
their CA to their system\'s list of trusted CAs instead of using
\'--insecure-registry\'.'
complete -c crio -n '__fish_crio_no_subcommand' -f -l internal-wipe -d 'Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.'
complete -c crio -n '__fish_crio_no_subcommand' -l listen -r -d 'Path to the CRI-O socket'
complete -c crio -n '__fish_crio_no_subcommand' -l log -r -d 'Set the log file path where internal debug information is written'
complete -c crio -n '__fish_crio_no_subcommand' -l log-dir -r -d 'Default log directory where all logs will go unless directly specified by the kubelet'
Expand Down
2 changes: 1 addition & 1 deletion completions/zsh/_crio
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ it later with **--config**. Global options will modify the output.' 'version:dis
_describe 'commands' cmds

local -a opts
opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--insecure-registry' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--insecure-registry' '--internal-wipe' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
_describe 'global options' opts

return
Expand Down
5 changes: 4 additions & 1 deletion docs/crio.8.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ crio
[--hooks-dir]=[value]
[--image-volumes]=[value]
[--insecure-registry]=[value]
[--internal-wipe]
[--listen]=[value]
[--log-dir]=[value]
[--log-filter]=[value]
Expand Down Expand Up @@ -222,6 +223,8 @@ crio [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
their CA to their system's list of trusted CAs instead of using
'--insecure-registry'. (default: [])

**--internal-wipe**: Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.

**--listen**="": Path to the CRI-O socket (default: /var/run/crio/crio.sock)

**--log**="": Set the log file path where internal debug information is written
Expand Down Expand Up @@ -268,7 +271,7 @@ crio [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]

**--root, -r**="": The CRI-O root directory (default: /var/lib/containers/storage)

**--runroot**="": The CRI-O state directory (default: /var/run/containers/storage)
**--runroot**="": The CRI-O state directory (default: /run/containers/storage)

**--runtimes**="": OCI runtimes, format is runtime_name:runtime_path:runtime_root:runtime_type (default: [])

Expand Down
4 changes: 4 additions & 0 deletions docs/crio.conf.5.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ CRI-O reads its storage defaults from the containers-storage.conf(5) file locate
It is used to check if crio wipe should wipe images, which should
only happen when CRI-O has been upgraded

**internal_wipe**=false
Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts.
If set to false, one must run `crio wipe` to wipe the containers and images in these situations.

## CRIO.API TABLE
The `crio.api` table contains settings for the kubelet/gRPC interface.

Expand Down
9 changes: 9 additions & 0 deletions internal/criocli/criocli.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,9 @@ func mergeConfig(config *libconfig.Config, ctx *cli.Context) error {
if ctx.IsSet("version-file-persist") {
config.VersionFilePersist = ctx.String("version-file-persist")
}
if ctx.IsSet("internal-wipe") {
config.InternalWipe = ctx.Bool("internal-wipe")
}
if ctx.IsSet("enable-metrics") {
config.EnableMetrics = ctx.Bool("enable-metrics")
}
Expand Down Expand Up @@ -802,6 +805,12 @@ func getCrioFlags(defConf *libconfig.Config) []cli.Flag {
EnvVars: []string{"CONTAINER_VERSION_FILE_PERSIST"},
TakesFile: true,
},
&cli.BoolFlag{
Name: "internal-wipe",
Usage: "Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.",
Value: defConf.InternalWipe,
EnvVars: []string{"CONTAINER_INTERNAL_WIPE"},
},
}
}

Expand Down
107 changes: 38 additions & 69 deletions internal/lib/container_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,23 +145,23 @@ func New(ctx context.Context, configIface libconfig.Iface) (*ContainerServer, er
}

// LoadSandbox loads a sandbox from the disk into the sandbox store
func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
func (c *ContainerServer) LoadSandbox(id string) (sb *sandbox.Sandbox, retErr error) {
config, err := c.store.FromContainerDirectory(id, "config.json")
if err != nil {
return err
return nil, err
}
var m rspec.Spec
if err := json.Unmarshal(config, &m); err != nil {
return errors.Wrap(err, "error unmarshalling sandbox spec")
return nil, errors.Wrap(err, "error unmarshalling sandbox spec")
}
labels := make(map[string]string)
if err := json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels)
}
name := m.Annotations[annotations.Name]
name, err = c.ReservePodName(id, name)
if err != nil {
return err
return nil, err
}
defer func() {
if retErr != nil {
Expand All @@ -170,7 +170,7 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}()
var metadata pb.PodSandboxMetadata
if err := json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata)
}

processLabel := m.Process.SelinuxLabel
Expand All @@ -180,65 +180,56 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {

kubeAnnotations := make(map[string]string)
if err := json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations)
}

portMappings := []*hostport.PortMapping{}
if err := json.Unmarshal([]byte(m.Annotations[annotations.PortMappings]), &portMappings); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings)
}

privileged := isTrue(m.Annotations[annotations.PrivilegedRuntime])
hostNetwork := isTrue(m.Annotations[annotations.HostNetwork])
nsOpts := pb.NamespaceOption{}
if err := json.Unmarshal([]byte(m.Annotations[annotations.NamespaceOptions]), &nsOpts); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions)
}

created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
if err != nil {
return errors.Wrap(err, "parsing created timestamp annotation")
return nil, errors.Wrap(err, "parsing created timestamp annotation")
}

sb, err := sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created)
sb, err = sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created)
if err != nil {
return err
return nil, err
}
sb.AddHostnamePath(m.Annotations[annotations.HostnamePath])
sb.SetSeccompProfilePath(spp)
sb.SetNamespaceOptions(&nsOpts)

// We add an NS only if we can load a permanent one.
// Otherwise, the sandbox will live in the host namespace.
if c.config.ManageNSLifecycle {
netNsPath, err := configNsPath(&m, rspec.NetworkNamespace)
if err == nil {
if nsErr := sb.NetNsJoin(netNsPath); nsErr != nil {
return nsErr
}
}
ipcNsPath, err := configNsPath(&m, rspec.IPCNamespace)
if err == nil {
if nsErr := sb.IpcNsJoin(ipcNsPath); nsErr != nil {
return nsErr
}
}
utsNsPath, err := configNsPath(&m, rspec.UTSNamespace)
namespacesToJoin := []struct {
rspecNS rspec.LinuxNamespaceType
joinFunc func(string) error
}{
{rspecNS: rspec.NetworkNamespace, joinFunc: sb.NetNsJoin},
{rspecNS: rspec.IPCNamespace, joinFunc: sb.IpcNsJoin},
{rspecNS: rspec.UTSNamespace, joinFunc: sb.UtsNsJoin},
{rspecNS: rspec.UserNamespace, joinFunc: sb.UserNsJoin},
}
for _, namespaceToJoin := range namespacesToJoin {
path, err := configNsPath(&m, namespaceToJoin.rspecNS)
if err == nil {
if nsErr := sb.UtsNsJoin(utsNsPath); nsErr != nil {
return nsErr
}
}
userNsPath, err := configNsPath(&m, rspec.UserNamespace)
if err == nil {
if nsErr := sb.UserNsJoin(userNsPath); nsErr != nil {
return nsErr
if nsErr := namespaceToJoin.joinFunc(path); nsErr != nil {
return sb, nsErr
}
}
}

if err := c.AddSandbox(sb); err != nil {
return err
return sb, err
}

defer func() {
Expand All @@ -251,19 +242,19 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {

sandboxPath, err := c.store.ContainerRunDirectory(id)
if err != nil {
return err
return sb, err
}

sandboxDir, err := c.store.ContainerDirectory(id)
if err != nil {
return err
return sb, err
}

cID := m.Annotations[annotations.ContainerID]

cname, err := c.ReserveContainerName(cID, m.Annotations[annotations.ContainerName])
if err != nil {
return err
return sb, err
}
defer func() {
if retErr != nil {
Expand All @@ -283,15 +274,15 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
if !wasSpoofed {
scontainer, err = oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], labels, m.Annotations, kubeAnnotations, m.Annotations[annotations.Image], "", "", nil, id, false, false, false, sb.RuntimeHandler(), sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
if err != nil {
return err
return sb, err
}
scontainer.SetSpec(&m)
scontainer.SetMountPoint(m.Annotations[annotations.MountPoint])

if m.Annotations[annotations.Volumes] != "" {
containerVolumes := []oci.ContainerVolume{}
if err = json.Unmarshal([]byte(m.Annotations[annotations.Volumes]), &containerVolumes); err != nil {
return fmt.Errorf("failed to unmarshal container volumes: %v", err)
return sb, fmt.Errorf("failed to unmarshal container volumes: %v", err)
}
for _, cv := range containerVolumes {
scontainer.AddVolume(cv)
Expand All @@ -302,50 +293,28 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}

if err := c.ContainerStateFromDisk(scontainer); err != nil {
return fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err)
return sb, fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err)
}

// We write back the state because it is possible that crio did not have a chance to
// read the exit file and persist exit code into the state on reboot.
if err := c.ContainerStateToDisk(scontainer); err != nil {
return fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err)
return sb, fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err)
}

if err := sb.SetInfraContainer(scontainer); err != nil {
return err
}

// We add an NS only if we can load a permanent one.
// Otherwise, the sandbox will live in the host namespace.
if c.config.ManageNSLifecycle || wasSpoofed {
namespacesToJoin := []struct {
rspecNS rspec.LinuxNamespaceType
joinFunc func(string) error
}{
{rspecNS: rspec.NetworkNamespace, joinFunc: sb.NetNsJoin},
{rspecNS: rspec.IPCNamespace, joinFunc: sb.IpcNsJoin},
{rspecNS: rspec.UTSNamespace, joinFunc: sb.UtsNsJoin},
{rspecNS: rspec.UserNamespace, joinFunc: sb.UserNsJoin},
}
for _, namespaceToJoin := range namespacesToJoin {
path, err := configNsPath(&m, namespaceToJoin.rspecNS)
if err == nil {
if nsErr := namespaceToJoin.joinFunc(path); err != nil {
return nsErr
}
}
}
return sb, err
}

sb.SetCreated()
if err := label.ReserveLabel(processLabel); err != nil {
return err
return sb, err
}

sb.RestoreStopped()

if err := c.ctrIDIndex.Add(scontainer.ID()); err != nil {
return err
return sb, err
}
defer func() {
if retErr != nil {
Expand All @@ -355,9 +324,9 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}
}()
if err := c.podIDIndex.Add(id); err != nil {
return err
return sb, err
}
return nil
return sb, nil
}

func configNsPath(spec *rspec.Spec, nsType rspec.LinuxNamespaceType) (string, error) {
Expand Down
Loading