Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
630f276
test: add runtime() function
haircommander Feb 22, 2021
176cd4a
resourcestore: introduce ResourceCleaner
haircommander Apr 5, 2021
2cb0840
server: use background context for network stop
haircommander Apr 26, 2021
7156bdc
Cleanup pod network on sandbox removal
saschagrunert Apr 15, 2021
2e21735
server: reuse container removal code for infra
haircommander Apr 15, 2021
621d296
storage: remove RemovePodSandbox function
haircommander Apr 15, 2021
3194c02
server: breakup stop/remove all functions with internal helpers
haircommander Apr 15, 2021
286c407
config: add InternalWipe
haircommander Apr 15, 2021
5e6e0df
crio wipe: add support for internal_wipe
haircommander Apr 15, 2021
4f481e5
server: add support for internal_wipe
haircommander Apr 15, 2021
4674b87
test: add test for internal_wipe
haircommander Apr 15, 2021
d5465df
Add resource cleaner retry functionality
saschagrunert Apr 16, 2021
df61d59
server: move newPodNetwork to a more logical place
haircommander Apr 16, 2021
4863486
server: get hooks after we've check if a sandbox is already stopped
haircommander Apr 16, 2021
8af122e
test: add test for delayed cleanup of network on restart
haircommander Apr 16, 2021
2048b2f
resourcestore: run cleanup in parallel
haircommander Apr 26, 2021
631c535
server: group namespace cleanup with network stop
haircommander Apr 21, 2021
7b21821
server: don't unconditionally fail on sandbox cleanup
haircommander Apr 21, 2021
62cbb3f
sandbox: fix race with cleanup
haircommander Apr 26, 2021
2c302c8
use more ContainerServer.StopContainer
haircommander Apr 26, 2021
2b0cac3
sandbox remove: unmount shm before removing infra container
haircommander Apr 26, 2021
fd99820
server: properly remove sandbox network on failed restore
haircommander May 13, 2021
dacd69a
move internal wipe to only wipe images
haircommander May 13, 2021
a961c0d
ignore storage.ErrNotAContainer
haircommander May 13, 2021
b46e733
test: adapt crio wipe tests to handle new behavior
haircommander May 14, 2021
433d87a
container_server: fix nsJoining
haircommander Jun 17, 2021
a611724
storage: succeed in DeleteContainer if container is unknown
haircommander Jun 2, 2021
d294d7e
server: don't repeatedly error with no such id
haircommander Jun 16, 2021
b9a58ad
server: reduce log verbosity on restore
haircommander May 20, 2021
40d3bcc
server: call CNI del in separate routine in restore
haircommander May 20, 2021
7d6a05a
fix lint by removing dead code
haircommander Jun 17, 2021
f65e248
lib: unconditionally attempt to restore namespaces
haircommander Jun 22, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions cmd/crio/wipe.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,15 @@ func crioWipe(c *cli.Context) error {
}
}

// If crio is configured to wipe internally (and `--force` wasn't set)
// the `crio wipe` command has nothing left to do,
// as the remaining work will be done on server startup.
if config.InternalWipe && !c.IsSet("force") {
return nil
}

logrus.Infof("Internal wipe not set, meaning crio wipe will wipe. In the future, all wipes after reboot will happen when starting the crio server.")

// if we should not wipe, exit with no error
if !shouldWipeContainers {
// we should not wipe images without wiping containers
Expand Down
1 change: 1 addition & 0 deletions completions/bash/crio
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ h
--image-volumes
--infra-ctr-cpuset
--insecure-registry
--internal-wipe
--irqbalance-config-file
--listen
--log
Expand Down
1 change: 1 addition & 0 deletions completions/fish/crio.fish
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ complete -c crio -n '__fish_crio_no_subcommand' -f -l insecure-registry -r -d 'E
be enabled for testing purposes**. For increased security, users should add
their CA to their system\'s list of trusted CAs instead of using
\'--insecure-registry\'.'
complete -c crio -n '__fish_crio_no_subcommand' -f -l internal-wipe -d 'Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.'
complete -c crio -n '__fish_crio_no_subcommand' -f -l irqbalance-config-file -r -d 'The irqbalance service config file which is used by CRI-O.'
complete -c crio -n '__fish_crio_no_subcommand' -l listen -r -d 'Path to the CRI-O socket'
complete -c crio -n '__fish_crio_no_subcommand' -l log -r -d 'Set the log file path where internal debug information is written'
Expand Down
2 changes: 1 addition & 1 deletion completions/zsh/_crio
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ it later with **--config**. Global options will modify the output.' 'version:dis
_describe 'commands' cmds

local -a opts
opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--enable-profile-unix-socket' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--infra-ctr-cpuset' '--insecure-registry' '--irqbalance-config-file' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--separate-pull-cgroup' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
opts=('--additional-devices' '--apparmor-profile' '--big-files-temporary-dir' '--bind-mount-prefix' '--cgroup-manager' '--cni-config-dir' '--cni-default-network' '--cni-plugin-dir' '--config' '--config-dir' '--conmon' '--conmon-cgroup' '--conmon-env' '--container-attach-socket-dir' '--container-exits-dir' '--ctr-stop-timeout' '--decryption-keys-path' '--default-capabilities' '--default-env' '--default-mounts-file' '--default-runtime' '--default-sysctls' '--default-transport' '--default-ulimits' '--drop-infra-ctr' '--enable-metrics' '--enable-profile-unix-socket' '--gid-mappings' '--global-auth-file' '--grpc-max-recv-msg-size' '--grpc-max-send-msg-size' '--hooks-dir' '--image-volumes' '--infra-ctr-cpuset' '--insecure-registry' '--internal-wipe' '--irqbalance-config-file' '--listen' '--log' '--log-dir' '--log-filter' '--log-format' '--log-journald' '--log-level' '--log-size-max' '--manage-ns-lifecycle' '--metrics-port' '--metrics-socket' '--namespaces-dir' '--no-pivot' '--pause-command' '--pause-image' '--pause-image-auth-file' '--pids-limit' '--pinns-path' '--profile' '--profile-port' '--read-only' '--registries-conf' '--registry' '--root' '--runroot' '--runtimes' '--seccomp-profile' '--seccomp-use-default-when-empty' '--selinux' '--separate-pull-cgroup' '--signature-policy' '--storage-driver' '--storage-opt' '--stream-address' '--stream-enable-tls' '--stream-idle-timeout' '--stream-port' '--stream-tls-ca' '--stream-tls-cert' '--stream-tls-key' '--uid-mappings' '--version-file' '--version-file-persist' '--help' '--version')
_describe 'global options' opts

return
Expand Down
3 changes: 3 additions & 0 deletions docs/crio.8.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ crio
[--image-volumes]=[value]
[--infra-ctr-cpuset]=[value]
[--insecure-registry]=[value]
[--internal-wipe]
[--irqbalance-config-file]=[value]
[--listen]=[value]
[--log-dir]=[value]
Expand Down Expand Up @@ -230,6 +231,8 @@ crio [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
their CA to their system's list of trusted CAs instead of using
'--insecure-registry'. (default: [])

**--internal-wipe**: Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.

**--irqbalance-config-file**="": The irqbalance service config file which is used by CRI-O. (default: /etc/sysconfig/irqbalance)

**--listen**="": Path to the CRI-O socket (default: /var/run/crio/crio.sock)
Expand Down
4 changes: 4 additions & 0 deletions docs/crio.conf.5.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ CRI-O reads its storage defaults from the containers-storage.conf(5) file locate
It is used to check if crio wipe should wipe images, which should
only happen when CRI-O has been upgraded

**internal_wipe**=false
Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts.
If set to false, one must run `crio wipe` to wipe the containers and images in these situations.

## CRIO.API TABLE
The `crio.api` table contains settings for the kubelet/gRPC interface.

Expand Down
9 changes: 9 additions & 0 deletions internal/criocli/criocli.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,9 @@ func mergeConfig(config *libconfig.Config, ctx *cli.Context) error {
if ctx.IsSet("version-file-persist") {
config.VersionFilePersist = ctx.String("version-file-persist")
}
if ctx.IsSet("internal-wipe") {
config.InternalWipe = ctx.Bool("internal-wipe")
}
if ctx.IsSet("enable-metrics") {
config.EnableMetrics = ctx.Bool("enable-metrics")
}
Expand Down Expand Up @@ -827,6 +830,12 @@ func getCrioFlags(defConf *libconfig.Config) []cli.Flag {
EnvVars: []string{"CONTAINER_VERSION_FILE_PERSIST"},
TakesFile: true,
},
&cli.BoolFlag{
Name: "internal-wipe",
Usage: "Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations.",
Value: defConf.InternalWipe,
EnvVars: []string{"CONTAINER_INTERNAL_WIPE"},
},
&cli.StringFlag{
Name: "infra-ctr-cpuset",
Usage: "CPU set to run infra containers, if not specified CRI-O will use all online CPUs to run infra containers (default: '').",
Expand Down
107 changes: 38 additions & 69 deletions internal/lib/container_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,23 +144,23 @@ func New(ctx context.Context, configIface libconfig.Iface) (*ContainerServer, er
}

// LoadSandbox loads a sandbox from the disk into the sandbox store
func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
func (c *ContainerServer) LoadSandbox(id string) (sb *sandbox.Sandbox, retErr error) {
config, err := c.store.FromContainerDirectory(id, "config.json")
if err != nil {
return err
return nil, err
}
var m rspec.Spec
if err := json.Unmarshal(config, &m); err != nil {
return errors.Wrap(err, "error unmarshalling sandbox spec")
return nil, errors.Wrap(err, "error unmarshalling sandbox spec")
}
labels := make(map[string]string)
if err := json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Labels)
}
name := m.Annotations[annotations.Name]
name, err = c.ReservePodName(id, name)
if err != nil {
return err
return nil, err
}
defer func() {
if retErr != nil {
Expand All @@ -169,7 +169,7 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}()
var metadata sandbox.Metadata
if err := json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Metadata)
}

processLabel := m.Process.SelinuxLabel
Expand All @@ -179,65 +179,56 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {

kubeAnnotations := make(map[string]string)
if err := json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.Annotations)
}

portMappings := []*hostport.PortMapping{}
if err := json.Unmarshal([]byte(m.Annotations[annotations.PortMappings]), &portMappings); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.PortMappings)
}

privileged := isTrue(m.Annotations[annotations.PrivilegedRuntime])
hostNetwork := isTrue(m.Annotations[annotations.HostNetwork])
nsOpts := sandbox.NamespaceOption{}
if err := json.Unmarshal([]byte(m.Annotations[annotations.NamespaceOptions]), &nsOpts); err != nil {
return errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions)
return nil, errors.Wrapf(err, "error unmarshalling %s annotation", annotations.NamespaceOptions)
}

created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
if err != nil {
return errors.Wrap(err, "parsing created timestamp annotation")
return nil, errors.Wrap(err, "parsing created timestamp annotation")
}

sb, err := sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created, m.Annotations[crioann.UsernsModeAnnotation])
sb, err = sandbox.New(id, m.Annotations[annotations.Namespace], name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], m.Annotations[annotations.CgroupParent], privileged, m.Annotations[annotations.RuntimeHandler], m.Annotations[annotations.ResolvPath], m.Annotations[annotations.HostName], portMappings, hostNetwork, created, m.Annotations[crioann.UsernsModeAnnotation])
if err != nil {
return err
return nil, err
}
sb.AddHostnamePath(m.Annotations[annotations.HostnamePath])
sb.SetSeccompProfilePath(spp)
sb.SetNamespaceOptions(&nsOpts)

// We add an NS only if we can load a permanent one.
// Otherwise, the sandbox will live in the host namespace.
if c.config.ManageNSLifecycle {
netNsPath, err := configNsPath(&m, rspec.NetworkNamespace)
if err == nil {
if nsErr := sb.NetNsJoin(netNsPath); nsErr != nil {
return nsErr
}
}
ipcNsPath, err := configNsPath(&m, rspec.IPCNamespace)
if err == nil {
if nsErr := sb.IpcNsJoin(ipcNsPath); nsErr != nil {
return nsErr
}
}
utsNsPath, err := configNsPath(&m, rspec.UTSNamespace)
namespacesToJoin := []struct {
rspecNS rspec.LinuxNamespaceType
joinFunc func(string) error
}{
{rspecNS: rspec.NetworkNamespace, joinFunc: sb.NetNsJoin},
{rspecNS: rspec.IPCNamespace, joinFunc: sb.IpcNsJoin},
{rspecNS: rspec.UTSNamespace, joinFunc: sb.UtsNsJoin},
{rspecNS: rspec.UserNamespace, joinFunc: sb.UserNsJoin},
}
for _, namespaceToJoin := range namespacesToJoin {
path, err := configNsPath(&m, namespaceToJoin.rspecNS)
if err == nil {
if nsErr := sb.UtsNsJoin(utsNsPath); nsErr != nil {
return nsErr
}
}
userNsPath, err := configNsPath(&m, rspec.UserNamespace)
if err == nil {
if nsErr := sb.UserNsJoin(userNsPath); nsErr != nil {
return nsErr
if nsErr := namespaceToJoin.joinFunc(path); nsErr != nil {
return sb, nsErr
}
}
}

if err := c.AddSandbox(sb); err != nil {
return err
return sb, err
}

defer func() {
Expand All @@ -250,19 +241,19 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {

sandboxPath, err := c.store.ContainerRunDirectory(id)
if err != nil {
return err
return sb, err
}

sandboxDir, err := c.store.ContainerDirectory(id)
if err != nil {
return err
return sb, err
}

cID := m.Annotations[annotations.ContainerID]

cname, err := c.ReserveContainerName(cID, m.Annotations[annotations.ContainerName])
if err != nil {
return err
return sb, err
}
defer func() {
if retErr != nil {
Expand All @@ -282,15 +273,15 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
if !wasSpoofed {
scontainer, err = oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], labels, m.Annotations, kubeAnnotations, m.Annotations[annotations.Image], "", "", nil, id, false, false, false, sb.RuntimeHandler(), sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
if err != nil {
return err
return sb, err
}
scontainer.SetSpec(&m)
scontainer.SetMountPoint(m.Annotations[annotations.MountPoint])

if m.Annotations[annotations.Volumes] != "" {
containerVolumes := []oci.ContainerVolume{}
if err = json.Unmarshal([]byte(m.Annotations[annotations.Volumes]), &containerVolumes); err != nil {
return fmt.Errorf("failed to unmarshal container volumes: %v", err)
return sb, fmt.Errorf("failed to unmarshal container volumes: %v", err)
}
for _, cv := range containerVolumes {
scontainer.AddVolume(cv)
Expand All @@ -301,50 +292,28 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}

if err := c.ContainerStateFromDisk(scontainer); err != nil {
return fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err)
return sb, fmt.Errorf("error reading sandbox state from disk %q: %v", scontainer.ID(), err)
}

// We write back the state because it is possible that crio did not have a chance to
// read the exit file and persist exit code into the state on reboot.
if err := c.ContainerStateToDisk(scontainer); err != nil {
return fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err)
return sb, fmt.Errorf("failed to write container %q state to disk: %v", scontainer.ID(), err)
}

if err := sb.SetInfraContainer(scontainer); err != nil {
return err
}

// We add an NS only if we can load a permanent one.
// Otherwise, the sandbox will live in the host namespace.
if c.config.ManageNSLifecycle || wasSpoofed {
namespacesToJoin := []struct {
rspecNS rspec.LinuxNamespaceType
joinFunc func(string) error
}{
{rspecNS: rspec.NetworkNamespace, joinFunc: sb.NetNsJoin},
{rspecNS: rspec.IPCNamespace, joinFunc: sb.IpcNsJoin},
{rspecNS: rspec.UTSNamespace, joinFunc: sb.UtsNsJoin},
{rspecNS: rspec.UserNamespace, joinFunc: sb.UserNsJoin},
}
for _, namespaceToJoin := range namespacesToJoin {
path, err := configNsPath(&m, namespaceToJoin.rspecNS)
if err == nil {
if nsErr := namespaceToJoin.joinFunc(path); err != nil {
return nsErr
}
}
}
return sb, err
}

sb.SetCreated()
if err := label.ReserveLabel(processLabel); err != nil {
return err
return sb, err
}

sb.RestoreStopped()

if err := c.ctrIDIndex.Add(scontainer.ID()); err != nil {
return err
return sb, err
}
defer func() {
if retErr != nil {
Expand All @@ -354,9 +323,9 @@ func (c *ContainerServer) LoadSandbox(id string) (retErr error) {
}
}()
if err := c.podIDIndex.Add(id); err != nil {
return err
return sb, err
}
return nil
return sb, nil
}

func configNsPath(spec *rspec.Spec, nsType rspec.LinuxNamespaceType) (string, error) {
Expand Down
Loading